repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
alemazzeo/arduscope
[ "9671c49fb22eacd8575f80366fb95d0a8f5b33a9" ]
[ "arduscope/arduscope.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom __future__ import annotations\n\nimport calendar\nimport json\nimport os\nimport pathlib\nimport threading\nimport time\nfrom collections import deque\nfrom dataclasses import dataclass, field, asdict\nfrom tqdm import tqdm\nfrom typing import List, Dict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom serial import Serial\n\nBUFFER = 480\nMAX_FREQ = 32000\nREAL_MAX_FREQ = 20000\nMAX_PULSE_WIDTH = 32767\nBAUDRATE = 115200\nOFFSET_TIME = 0.000054554\n\n\nARDUINO_PARAMS = [\n \"limit\",\n \"frequency\",\n \"reference\",\n \"trigger\",\n \"trigger_channel\",\n \"trigger_offset\",\n \"trigger_tol\",\n \"channels\",\n \"adc_prescaler\",\n \"pulse_width\",\n]\n\n\n@dataclass\nclass ArduscopeMeasure:\n acquire_time: float\n frequency: int\n pulse_width: float\n trigger_value: float\n amplitude: float\n n_channels: int\n trigger_channel: str\n trigger_offset: float\n\n x: List[np.ndarray] = field(init=False)\n channels: List[np.ndarray] = list\n version: str = \"0.3.1\"\n\n def __post_init__(self):\n self.acquire_time = float(self.acquire_time)\n self.frequency = int(self.frequency)\n self.pulse_width = float(self.pulse_width)\n self.trigger_value = float(self.trigger_value)\n self.amplitude = float(self.amplitude)\n self.n_channels = int(self.n_channels)\n self.trigger_channel = str(self.trigger_channel)\n self.trigger_offset = float(self.trigger_offset)\n base_x = np.arange(BUFFER // self.n_channels) / self.frequency\n self.x = [\n base_x + OFFSET_TIME * i\n for i in range(self.n_channels)\n ]\n\n def save(self, file: [str, os.PathLike], overwrite: bool = False):\n \"\"\" Saves a screen into a file (csv, npz or json)\n\n Parameters\n ----------\n file : str or os.PathLike\n A filename with desired format in extension (.csv, .npz or .json)\n overwrite : bool\n Indicates if the file is overwrite on exists case\n \"\"\"\n if isinstance(file, (str, os.PathLike)):\n filename = pathlib.Path(file).absolute()\n else:\n raise TypeError\n\n if overwrite is False and filename.exists():\n raise FileExistsError\n\n as_dict: dict = asdict(self)\n\n if filename.suffix == \".json\":\n with open(filename, mode=\"w\") as f:\n as_dict.pop(\"x\")\n as_dict[\"channels\"] = [\n channel.tolist()\n for channel in self.channels\n ]\n json.dump(as_dict, f)\n elif filename.suffix == \".npz\":\n as_dict.pop(\"x\")\n np.savez(filename, **as_dict)\n elif filename.suffix == \".csv\":\n as_dict[\"acquire_time\"] = time.strftime(\n \"%d/%m/%Y %H:%M:%S\",\n time.gmtime(self.acquire_time)\n )\n header = \"\\n\".join([\n f\"# {key} = {value}\"\n for key, value in as_dict.items()\n if key not in [\"x\", \"channels\"]\n ])\n with open(filename, mode=\"w\") as f:\n f.write(header)\n f.write(\"\\n\")\n for i in range(self.channels[0].shape[0]):\n f.write(f\"\\n### Screen {i} of {self.channels[0].shape[0]}\\n\")\n screen = [\n channel[i, :]\n for channel in self.channels\n ]\n data = np.append(self.x, screen, axis=0).T\n np.savetxt(f, data, fmt=\"%.9e\")\n f.write(\"### End of screen\\n\")\n else:\n raise ValueError\n\n @classmethod\n def load(cls, file: [str, os.PathLike]) -> ArduscopeMeasure:\n \"\"\" Loads a screen from a file (csv, npz or json)\n\n Parameters\n ----------\n file : str or os.PathLike\n A filename with valid extension (.csv, .npz or .json)\n\n Returns\n -------\n ArduscopeScreen instance with loaded data\n \"\"\"\n if isinstance(file, (str, os.PathLike)):\n filename = pathlib.Path(file).absolute()\n else:\n raise TypeError\n\n if not filename.exists():\n raise FileNotFoundError\n\n if filename.suffix == \".json\":\n with open(filename, mode=\"r\") as f:\n data = json.load(f)\n data[\"channels\"] = [\n np.array(ch)\n for ch in data[\"channels\"]\n ]\n return cls(**data)\n elif filename.suffix == \".npz\":\n data = np.load(str(filename))\n return cls(**data)\n elif filename.suffix == \".csv\":\n as_dict = {}\n with open(filename, mode=\"r\") as f:\n line = f.readline().strip()\n while line.startswith(\"#\"):\n split = line.replace(\"#\", \"\").split(\"=\")\n if len(split) == 2:\n key, value = split[0].strip(), split[1].strip()\n as_dict[key] = value\n line = f.readline().strip()\n\n as_dict[\"acquire_time\"] = calendar.timegm(\n time.strptime(as_dict[\"acquire_time\"], \"%d/%m/%Y %H:%M:%S\")\n )\n data = np.loadtxt(str(filename))\n n = data.shape[0] // (BUFFER // int(as_dict[\"n_channels\"]))\n as_dict[\"channels\"] = [\n np.asarray(np.array_split(data[:, i+1], n))\n for i in range(int(as_dict[\"n_channels\"]))\n ]\n return cls(**as_dict)\n else:\n raise ValueError\n\n\nclass Arduscope:\n _open_ports: Dict[str, Arduscope] = {}\n\n def __init__(self, port: str, deque_max_size: int = 100):\n \"\"\"\n Parameters\n ----------\n port : str\n Connection port of Arduino, like \"COM1\" or \"/dev/ttyUS0\"\n deque_max_size : int\n Max size of screen buffer (a double-ended queue)\n \"\"\"\n if not isinstance(port, str):\n raise TypeError\n\n self._port = port\n self._baudrate = BAUDRATE\n\n self._serial = self._open_serial()\n\n self._capture_parameters = None\n\n self._freq = None\n self._pulse_width = None\n self._amplitude = None\n self._ref = None\n self._n_channels = None\n self._trigger_value = None\n self._trigger_channel = None\n self._trigger_channel_code = None\n self._trigger_tol = None\n self._trigger_offset = None\n self._adc_prescaler = 4\n self._ref_values = {\"5.0\": 0, \"1.1\": 1}\n\n self._measure_params = None\n self._data_buffer = deque(maxlen=deque_max_size)\n\n self._daemon = None\n self._running = threading.Event()\n self._screen_ready = threading.Event()\n\n self._uptime = time.time()\n\n self.frequency = 200\n self.pulse_width = 0.1\n self.trigger_value = 2.5\n self.amplitude = 5.0\n self.n_channels = 2\n self.trigger_channel = \"A0\"\n self.trigger_offset = 0.05\n self._trigger_tol = 5\n\n self._live_mode_on = False\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n def _open_serial(self) -> Serial:\n \"\"\"\n Opens a serial port between Arduino and Python\n\n Returns\n -------\n Serial (from PySerial library)\n \"\"\"\n\n if self._port in Arduscope._open_ports.keys():\n print(f\"Closing other Arduscope instances in port {self._port}...\")\n other_arduscope = Arduscope._open_ports[self._port]\n try:\n other_arduscope.stop_acquire()\n other_arduscope._serial.close()\n except AttributeError:\n pass\n\n Arduscope._open_ports.update({\n self._port: self\n })\n\n serial = Serial(port=self._port, baudrate=self._baudrate, timeout=1)\n msg = \"\"\n start_time = time.time()\n while msg != \"BOOTED\\r\\n\":\n try:\n msg = serial.readline().decode('utf-8')\n except UnicodeDecodeError:\n pass\n if time.time() - start_time > 5:\n raise TimeoutError(\"Arduino is not responding\")\n return serial\n\n @property\n def uptime(self) -> float:\n \"\"\" Uptime of Arduscope object creation\"\"\"\n return time.time() - self._uptime\n\n @property\n def x(self) -> List[np.ndarray]:\n \"\"\" Time-array for x axes representation \"\"\"\n base_x = np.arange(BUFFER // self.n_channels) / self.frequency\n return [\n base_x + OFFSET_TIME * i\n for i in range(self._n_channels)\n ]\n\n @property\n def channels(self) -> List[np.ndarray]:\n return [\n np.asarray([channels[i] for channels in self._data_buffer])\n for i in range(self._n_channels)\n ]\n\n @property\n def frequency(self) -> int:\n \"\"\" Frequency of sampling (in Hz) \"\"\"\n return self._freq\n\n @frequency.setter\n def frequency(self, value: int):\n if 1 <= value <= MAX_FREQ:\n self._freq = int(value)\n else:\n raise ValueError(f\"MIN: 1, MAX: {MAX_FREQ}\")\n self._on_property_change()\n\n @property\n def pulse_width(self) -> float:\n \"\"\" Output pulse width in PIN7 (in seconds) \"\"\"\n return self._pulse_width * 0.001\n\n @pulse_width.setter\n def pulse_width(self, value: float):\n if 0.002 <= value <= MAX_PULSE_WIDTH / 1000.0:\n self._pulse_width = int(value * 1000)\n else:\n raise ValueError(f\"MIN: 0.002, MAX: {MAX_PULSE_WIDTH / 1000.0}\")\n self._on_property_change()\n\n @property\n def amplitude(self) -> float:\n \"\"\" Max amplitude measured (in Volts) \"\"\"\n return self._amplitude\n\n @amplitude.setter\n def amplitude(self, value: float):\n if 0 < value <= 1.1:\n self._ref = \"1.1\"\n self._amplitude = value\n elif 1.1 <= value <= 5.0:\n self._ref = \"5.0\"\n self._amplitude = value\n else:\n raise ValueError(\"0.0 < value <= 5.0\")\n self._on_property_change()\n\n @property\n def trigger_value(self) -> float:\n \"\"\" Trigger value (in Volts) \"\"\"\n return self._trigger_value\n\n @trigger_value.setter\n def trigger_value(self, value: float):\n if 0 < value < 5.0:\n self._trigger_value = value\n else:\n raise ValueError(\"MIN: 0, MAX: 5.0\")\n self._on_property_change()\n\n @property\n def trigger_channel(self) -> str:\n \"\"\" Trigger channel\n\n Posible values:\n - \"A0\" to \"A6\" -> Analog inputs\n - \"D7OUT_HIGH\" -> When PIN7 changes to HIGH state\n - \"D7OUT_LOW\" -> When PIN7 changes to LOW state\n - \"REPEAT\" -> Immediately after transmitting the last measurement\n \"\"\"\n return self._trigger_channel\n\n @trigger_channel.setter\n def trigger_channel(self, value: str):\n if isinstance(value, str):\n if value.upper().startswith(\"A\") and len(value) == 2:\n self._trigger_channel_code = int(value[1])\n elif value.upper() == \"D7OUT_HIGH\":\n self._trigger_channel_code = -1\n elif value.upper() == \"D7OUT_LOW\":\n self._trigger_channel_code = -2\n elif value.upper() == \"REPEAT\":\n self._trigger_channel_code = -3\n else:\n raise ValueError(\"Posible values: \"\n '\"A0\", \"A1\", \"A2\", \"A3\", \"A4\", \"A5\", \"A6\", '\n '\"D7OUT_HIGH\", \"D7OUT_LOW\", \"REPEAT\"')\n else:\n raise TypeError(\"Posible values: \"\n '\"A0\", \"A1\", \"A2\", \"A3\", \"A4\", \"A5\", \"A6\", '\n '\"D7OUT_HIGH\", \"D7OUT_LOW\", \"REPEAT\"')\n self._trigger_channel = value\n self._on_property_change()\n\n @property\n def trigger_offset(self) -> float:\n \"\"\" Trigger offset in screen fraction (-1.0 to 1.0) \"\"\"\n return self._trigger_offset\n\n @trigger_offset.setter\n def trigger_offset(self, value: float):\n if isinstance(value, (int, float)):\n if -1.0 <= value <= 1.0:\n self._trigger_offset = value\n else:\n raise ValueError(\"MIN: -1.0, MAX: 1.0\")\n else:\n raise TypeError(\"Expected <float>, MIN: -1.0, MAX: 1.0\")\n self._on_property_change()\n\n @property\n def n_channels(self) -> int:\n \"\"\" Number of channels (1 to 6 available)\"\"\"\n return self._n_channels\n\n @n_channels.setter\n def n_channels(self, value: int):\n if 1 <= value <= 6:\n self._n_channels = int(value)\n else:\n raise ValueError(\"MIN: 1, MAX: 6\")\n self._on_property_change()\n\n @property\n def factor(self) -> float:\n \"\"\" Conversion factor given by Arduino Reference and bit depth (10 bits)\"\"\"\n if self._ref == \"5.0\":\n return 1024 / 5.0\n elif self._ref == \"1.1\":\n return 1024 / 1.1\n\n @property\n def measure(self) -> ArduscopeMeasure:\n \"\"\" An ArduscopeMeasure object with measurement params and channel data\"\"\"\n return ArduscopeMeasure(channels=self.channels, **self._measure_params)\n\n def start_acquire(self):\n \"\"\" Starts acquire in background (clearing previous state) \"\"\"\n if self._serial.isOpen() is False:\n self._serial = self._open_serial()\n parameters = {\n \"limit\": 0,\n \"frequency\": self._freq,\n \"reference\": self._ref_values[self._ref],\n \"trigger\": self._trigger_value * self.factor,\n \"trigger_channel\": self._trigger_channel_code,\n \"trigger_offset\": int((BUFFER // self._n_channels) * self._trigger_offset),\n \"trigger_tol\": self._trigger_tol,\n \"channels\": self._n_channels,\n \"adc_prescaler\": self._adc_prescaler,\n \"pulse_width\": self._pulse_width // 2\n }\n\n if self._ref == \"1.1\":\n if self._trigger_value > 1.1 and self._trigger_channel_code >= 0:\n raise ValueError(f\"Trigger value {self._trigger_value}V \"\n f\"greater than maximum amplitude {self._ref}V.\")\n\n if self._freq > REAL_MAX_FREQ / self._n_channels:\n print(f\"\\n*** WARNING ***\"\n f\"\\nMAXIMUM RECOMMENDED FREQUENCY FOR {self._n_channels} CHANNELS \"\n f\"IS {int(REAL_MAX_FREQ / self._n_channels):d}Hz.\\n\"\n f\"***\\n\")\n\n self._measure_params = {\n \"acquire_time\": time.time(),\n \"frequency\": self.frequency,\n \"pulse_width\": self.pulse_width,\n \"trigger_value\": self.trigger_value,\n \"amplitude\": self.amplitude,\n \"n_channels\": self.n_channels,\n \"trigger_channel\": self.trigger_channel,\n \"trigger_offset\": self.trigger_offset\n }\n\n typed_array = np.asarray(\n [parameters[x] for x in ARDUINO_PARAMS],\n dtype=np.int16\n )\n\n for param in typed_array:\n self._serial.write(int(param).to_bytes(2, byteorder=\"little\", signed=True))\n\n if self._daemon is not None:\n if self._daemon.is_alive():\n self._running.clear()\n self._daemon.join()\n\n self._running.set()\n self._screen_ready.clear()\n self._data_buffer.clear()\n self._daemon = threading.Thread(target=self._acquire_daemon, daemon=True)\n self._daemon.start()\n self._screen_ready.wait()\n\n def clear_buffer(self):\n self._data_buffer.clear()\n\n def wait_signal(self):\n \"\"\" Stops execution until screen buffer has at least one measurement\"\"\"\n self._screen_ready.wait()\n\n def wait_until(self, n_screens: int, timeout: float = None):\n \"\"\" Stops execution until screen buffer has at least <n_screen>\n\n Parameters\n ----------\n n_screens : int\n Number of screens required\n timeout : float\n Timeout in seconds (raises a TimeoutError exception)\n\n \"\"\"\n if isinstance(n_screens, int):\n if n_screens > self._data_buffer.maxlen:\n raise ValueError(f\"0 < n_screens < {self._data_buffer.maxlen}\")\n else:\n raise TypeError(f\"0 < n_screens < {self._data_buffer.maxlen}\")\n\n if timeout is not None:\n if not isinstance(timeout, (int, float)):\n raise TypeError(\"Timeout type: float\")\n\n start = time.time()\n current_screens = len(self._data_buffer)\n if current_screens < n_screens:\n with tqdm(\n total=n_screens,\n miniters=1,\n initial=current_screens,\n ncols=80,\n bar_format=\"{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt}\"\n ) as pb:\n pb.set_description(\"Waiting for requested screens\")\n while current_screens < n_screens:\n if timeout is not None:\n if time.time() - start > timeout:\n raise TimeoutError()\n pb.update(current_screens - pb.n)\n current_screens = len(self._data_buffer)\n pb.update(n_screens - pb.n)\n print()\n\n def stop_acquire(self):\n \"\"\" Stops acquire without clearing the buffer \"\"\"\n if self._running.isSet():\n self._running.clear()\n if self._daemon is not None:\n if self._daemon.is_alive():\n self._running.clear()\n self._daemon.join()\n\n self._serial.close()\n Arduscope._open_ports.pop(self._port, None)\n\n def close(self):\n self.stop_acquire()\n\n def _on_property_change(self):\n \"\"\" Handles the properties changes resetting acquisition\"\"\"\n self._data_buffer.clear()\n if self._running.isSet():\n self.stop_acquire()\n self.start_acquire()\n\n def _acquire_daemon(self):\n \"\"\" Background daemon that performs the buffer read \"\"\"\n while self._running.isSet():\n if self._serial.inWaiting() >= BUFFER * 2 + 2:\n channels = self._read_buffer()\n self._data_buffer.append(channels)\n self._screen_ready.set()\n\n def _read_buffer(self) -> List[np.ndarray]:\n \"\"\" Private function for buffer reading and conversion \"\"\"\n\n if self._serial.inWaiting() < BUFFER * 2 + 2:\n raise BufferError(\"Empty buffer\")\n\n raw_start = self._serial.read(2)\n start = int.from_bytes(raw_start, byteorder=\"little\", signed=True)\n\n raw_data = self._serial.read(BUFFER * 2)\n data = np.frombuffer(raw_data, dtype=np.uint16)\n data = data.reshape((BUFFER // self._n_channels, self._n_channels))\n channels = [\n np.roll(data[:, i], shift=-(start + 1) // self._n_channels) / self.factor\n for i in range(self._n_channels)\n ]\n\n return channels\n\n def simple_plot(self):\n fig, ax = plt.subplots(1, 1, figsize=(10, 6))\n\n curves = [\n ax.plot([], [], lw=2.0, label=f'Channel A{i}')[0]\n for i in range(self.n_channels)\n ]\n\n for i, channel in enumerate(self._data_buffer[-1]):\n curves[i].set_data(self.x[i], channel)\n\n ax.grid()\n ax.set_xlim(0, max(self.x[-1]))\n ax.set_ylim(0, self.amplitude)\n ax.set_xlabel(\"Time (s)\", fontsize=14)\n ax.set_ylabel(\"Voltage (V)\", fontsize=14)\n ax.legend(loc=1, fontsize=14)\n\n def live_plot(self, close_after: int = None):\n \"\"\" Deploy a Matplotlib window with the live state of Arduscope \"\"\"\n if not self._running.isSet():\n raise RuntimeError('First call \"start_acquire()\"')\n\n backend = plt.get_backend()\n\n if 'inline' in backend:\n print(\n f\"\\nCurrent backend of Matplotlib is {plt.get_backend()}\"\n f\"\\nLive mode not available for this backend\"\n )\n self.simple_plot()\n return\n\n def on_close(event):\n self._live_mode_on = False\n\n interactive_state = plt.isinteractive()\n\n plt.ion()\n\n self._live_mode_on = True\n fig: plt.Figure\n ax: plt.Axes\n fig, ax = plt.subplots(1, 1, figsize=(10, 6))\n fig.canvas.mpl_connect('close_event', on_close)\n curves = [\n ax.plot([], [], lw=2.0, label=f'Channel A{i}')[0]\n for i in range(self.n_channels)\n ]\n\n ax.grid()\n ax.set_xlim(0, max(self.x[-1]))\n ax.set_ylim(0, self.amplitude)\n ax.set_xlabel(\"Time (s)\", fontsize=14)\n ax.set_ylabel(\"Voltage (V)\", fontsize=14)\n ax.legend(loc=1, fontsize=14)\n\n current_screens = len(self._data_buffer)\n\n with tqdm(\n total=self._data_buffer.maxlen,\n initial=current_screens,\n ncols=80,\n bar_format=\"{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt}\"\n ) as pb:\n pb.set_description(\"Live mode on. Screen buffer status\")\n while self._live_mode_on is True:\n if close_after is not None:\n if len(self._data_buffer) >= close_after:\n plt.close(fig)\n self._live_mode_on = False\n fig.canvas.draw_idle()\n fig.canvas.flush_events()\n if self._screen_ready.isSet():\n for i, channel in enumerate(self._data_buffer[-1]):\n curves[i].set_data(self.x[i], channel)\n self._screen_ready.clear()\n pb.update(current_screens - pb.n)\n current_screens = len(self._data_buffer)\n if interactive_state is False:\n plt.ioff()\n\n print(\"\\n\")\n" ]
[ [ "numpy.savez", "numpy.asarray", "matplotlib.pyplot.isinteractive", "matplotlib.pyplot.get_backend", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.frombuffer", "matplotlib.pyplot.ioff", "numpy.array_split", "numpy.append", "matplotlib.pyplot.close", "numpy.savetxt", "numpy.array", "matplotlib.pyplot.ion", "numpy.roll" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thoesy2010/ANN_practice
[ "79f7d5bb1a34483a802e3052ba393245768e9ab9" ]
[ "july23rd_ANN_adagrad.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 23 09:47:13 2019\r\n\r\nusing adagrad + 2layer \r\n\r\n@author: hu\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\ndataset = pd.read_csv('Social_Network_Ads.csv')\r\n\r\nX = dataset.iloc[:,[1,2,3]].values\r\ny = dataset.iloc[:,[4]].values\r\n#X['sex_dummy']=X.Gender.map({'Female':0,'Male':1})\r\n#F=pd.get_dummies(X,columns=['Gender'],drop_first=True)\r\n\r\n#ms\r\n\r\n\r\n\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nlabelencoder_X_1 = LabelEncoder()\r\nX[:, 0] = labelencoder_X_1.fit_transform(X[:, 0])\r\nonehotencoder = OneHotEncoder(categorical_features = [0])\r\nX = onehotencoder.fit_transform(X).toarray()\r\nX = X[:, 1:]\r\n##\r\n#\r\n#\r\n#\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\r\n\r\n## Feature Scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc = StandardScaler()\r\nX_train = sc.fit_transform(X_train)\r\nX_test = sc.transform(X_test)\r\n\r\nalphas = [0.001,0.01,0.1,1,10,100,1000]\r\n\r\n# compute sigmoid nonlinearity\r\ndef sigmoid(x):\r\n output = 1/(1+np.exp(-x))\r\n return output\r\n\r\n# convert output of sigmoid function to its derivative\r\ndef sigmoid_output_to_derivative(output):\r\n return output*(1-output)\r\n \r\n#X = np.array([[0,0,1],\r\n# [0,1,1],\r\n# [1,0,1],\r\n# [1,1,1]])\r\n# \r\n#y = np.array([[0],\r\n#\t\t\t[1],\r\n#\t\t\t[1],\r\n#\t\t\t[0]])\r\n\r\nfor alpha in alphas:\r\n print (\"\\nTraining With Alpha:\" + str(alpha))\r\n np.random.seed(1)\r\n\r\n # randomly initialize our weights with mean 0\r\n synapse_0 = 2*np.random.random((3,6)) - 1\r\n synapse_1 = 2*np.random.random((6,6)) - 1\r\n synapse_2 = 2*np.random.random((6,1)) - 1\r\n\r\n for j in range(60000):\r\n\r\n # Feed forward through layers 0, 1, and 2\r\n layer_0 = X\r\n layer_1 = sigmoid(np.dot(layer_0,synapse_0))\r\n layer_2 = sigmoid(np.dot(layer_1,synapse_1))\r\n layer_3 = sigmoid(np.dot(layer_2,synapse_2))\r\n\r\n # how much did we miss the target value?\r\n layer_3_error = layer_3 - y\r\n\r\n if (j% 10000) == 0:\r\n print (\"Error after \"+str(j)+\" iterations:\" + str(np.mean(np.abs(layer_3_error))))\r\n\r\n # in what direction is the target value?\r\n # were we really sure? if so, don't change too much.\r\n layer_3_delta = layer_3_error*sigmoid_output_to_derivative(layer_3)\r\n \r\n layer_2_error = layer_3_delta.dot(synapse_2.T)\r\n \r\n layer_2_delta = layer_2_error*sigmoid_output_to_derivative(layer_2)\r\n\r\n # how much did each l1 value contribute to the l2 error (according to the weights)?\r\n layer_1_error = layer_2_delta.dot(synapse_1.T)\r\n\r\n # in what direction is the target l1?\r\n # were we really sure? if so, don't change too much.\r\n layer_1_delta = layer_1_error * sigmoid_output_to_derivative(layer_1)\r\n \r\n synapse_2 -= alpha * (layer_2.T.dot(layer_3_delta))\r\n synapse_1 -= alpha * (layer_1.T.dot(layer_2_delta))\r\n synapse_0 -= alpha * (layer_0.T.dot(layer_1_delta))\r\n\r\n" ]
[ [ "numpy.dot", "pandas.read_csv", "numpy.random.random", "numpy.abs", "numpy.random.seed", "sklearn.preprocessing.OneHotEncoder", "sklearn.model_selection.train_test_split", "numpy.exp", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.LabelEncoder" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
clbarnes/numba
[ "96bf1ea40a9b2208b51b263c6ae10b55358bdb06" ]
[ "numba/tests/support.py" ]
[ "\"\"\"\nAssorted utilities for use in tests.\n\"\"\"\n\nimport cmath\nimport contextlib\nimport enum\nimport gc\nimport math\nimport platform\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport time\nimport io\nimport ctypes\nimport multiprocessing as mp\nimport warnings\nimport traceback\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom numba import testing\nfrom numba.core import errors, typing, utils, config, cpu\nfrom numba.core.compiler import compile_extra, compile_isolated, Flags, DEFAULT_FLAGS\nimport unittest\nfrom numba.core.runtime import rtsys\nfrom numba.np import numpy_support\n\n\ntry:\n import scipy\nexcept ImportError:\n scipy = None\n\n\nenable_pyobj_flags = Flags()\nenable_pyobj_flags.set(\"enable_pyobject\")\n\nforce_pyobj_flags = Flags()\nforce_pyobj_flags.set(\"force_pyobject\")\n\nno_pyobj_flags = Flags()\n\nnrt_flags = Flags()\nnrt_flags.set(\"nrt\")\n\n\ntag = testing.make_tag_decorator(['important', 'long_running'])\n\n_32bit = sys.maxsize <= 2 ** 32\nis_parfors_unsupported = _32bit\nskip_parfors_unsupported = unittest.skipIf(\n is_parfors_unsupported,\n 'parfors not supported',\n)\nskip_py38_or_later = unittest.skipIf(\n utils.PYVERSION >= (3, 8),\n \"unsupported on py3.8 or later\"\n)\nskip_tryexcept_unsupported = unittest.skipIf(\n utils.PYVERSION < (3, 7),\n \"try-except unsupported on py3.6 or earlier\"\n)\nskip_tryexcept_supported = unittest.skipIf(\n utils.PYVERSION >= (3, 7),\n \"try-except supported on py3.7 or later\"\n)\n\n_msg = \"SciPy needed for test\"\nskip_unless_scipy = unittest.skipIf(scipy is None, _msg)\n\n_lnx_reason = 'linux only test'\nlinux_only = unittest.skipIf(not sys.platform.startswith('linux'), _lnx_reason)\n\n_is_armv7l = platform.machine() == 'armv7l'\n\ndisabled_test = unittest.skipIf(True, 'Test disabled')\n\n# See issue #4563, PPC64LE LLVM bug\nskip_ppc64le_issue4563 = unittest.skipIf(platform.machine() == 'ppc64le',\n (\"Hits: 'Parameter area must exist \"\n \"to pass an argument in memory'\"))\n\n# Typeguard\nhas_typeguard = bool(os.environ.get('NUMBA_USE_TYPEGUARD', 0))\n\nskip_unless_typeguard = unittest.skipUnless(\n has_typeguard, \"Typeguard is not enabled\",\n)\n\n# See issue #6465, PPC64LE LLVM bug\nskip_ppc64le_issue6465 = unittest.skipIf(platform.machine() == 'ppc64le',\n (\"Hits: 'mismatch in size of \"\n \"parameter area' in \"\n \"LowerCall_64SVR4\"))\n\nskip_unless_py37_or_later = lambda reason: \\\n unittest.skipIf(utils.PYVERSION < (3, 7),\n reason)\n\ntry:\n import scipy.linalg.cython_lapack\n has_lapack = True\nexcept ImportError:\n has_lapack = False\n\nneeds_lapack = unittest.skipUnless(has_lapack,\n \"LAPACK needs SciPy 1.0+\")\n\ntry:\n import scipy.linalg.cython_blas\n has_blas = True\nexcept ImportError:\n has_blas = False\n\nneeds_blas = unittest.skipUnless(has_blas, \"BLAS needs SciPy 1.0+\")\n\n\nclass CompilationCache(object):\n \"\"\"\n A cache of compilation results for various signatures and flags.\n This can make tests significantly faster (or less slow).\n \"\"\"\n\n def __init__(self):\n self.typingctx = typing.Context()\n self.targetctx = cpu.CPUContext(self.typingctx)\n self.cr_cache = {}\n\n def compile(self, func, args, return_type=None, flags=DEFAULT_FLAGS):\n \"\"\"\n Compile the function or retrieve an already compiled result\n from the cache.\n \"\"\"\n from numba.core.registry import cpu_target\n\n cache_key = (func, args, return_type, flags)\n if cache_key in self.cr_cache:\n cr = self.cr_cache[cache_key]\n else:\n # Register the contexts in case for nested @jit or @overload calls\n # (same as compile_isolated())\n with cpu_target.nested_context(self.typingctx, self.targetctx):\n cr = compile_extra(self.typingctx, self.targetctx, func,\n args, return_type, flags, locals={})\n self.cr_cache[cache_key] = cr\n return cr\n\n\nclass TestCase(unittest.TestCase):\n\n longMessage = True\n\n # A random state yielding the same random numbers for any test case.\n # Use as `self.random.<method name>`\n @utils.cached_property\n def random(self):\n return np.random.RandomState(42)\n\n def reset_module_warnings(self, module):\n \"\"\"\n Reset the warnings registry of a module. This can be necessary\n as the warnings module is buggy in that regard.\n See http://bugs.python.org/issue4180\n \"\"\"\n if isinstance(module, str):\n module = sys.modules[module]\n try:\n del module.__warningregistry__\n except AttributeError:\n pass\n\n @contextlib.contextmanager\n def assertTypingError(self):\n \"\"\"\n A context manager that asserts the enclosed code block fails\n compiling in nopython mode.\n \"\"\"\n _accepted_errors = (errors.LoweringError, errors.TypingError,\n TypeError, NotImplementedError)\n with self.assertRaises(_accepted_errors) as cm:\n yield cm\n\n @contextlib.contextmanager\n def assertRefCount(self, *objects):\n \"\"\"\n A context manager that asserts the given objects have the\n same reference counts before and after executing the\n enclosed block.\n \"\"\"\n old_refcounts = [sys.getrefcount(x) for x in objects]\n yield\n new_refcounts = [sys.getrefcount(x) for x in objects]\n for old, new, obj in zip(old_refcounts, new_refcounts, objects):\n if old != new:\n self.fail(\"Refcount changed from %d to %d for object: %r\"\n % (old, new, obj))\n\n @contextlib.contextmanager\n def assertNoNRTLeak(self):\n \"\"\"\n A context manager that asserts no NRT leak was created during\n the execution of the enclosed block.\n \"\"\"\n old = rtsys.get_allocation_stats()\n yield\n new = rtsys.get_allocation_stats()\n total_alloc = new.alloc - old.alloc\n total_free = new.free - old.free\n total_mi_alloc = new.mi_alloc - old.mi_alloc\n total_mi_free = new.mi_free - old.mi_free\n self.assertEqual(total_alloc, total_free,\n \"number of data allocs != number of data frees\")\n self.assertEqual(total_mi_alloc, total_mi_free,\n \"number of meminfo allocs != number of meminfo frees\")\n\n\n _bool_types = (bool, np.bool_)\n _exact_typesets = [_bool_types, (int,), (str,), (np.integer,),\n (bytes, np.bytes_)]\n _approx_typesets = [(float,), (complex,), (np.inexact)]\n _sequence_typesets = [(tuple, list)]\n _float_types = (float, np.floating)\n _complex_types = (complex, np.complexfloating)\n\n def _detect_family(self, numeric_object):\n \"\"\"\n This function returns a string description of the type family\n that the object in question belongs to. Possible return values\n are: \"exact\", \"complex\", \"approximate\", \"sequence\", and \"unknown\"\n \"\"\"\n if isinstance(numeric_object, np.ndarray):\n return \"ndarray\"\n\n if isinstance(numeric_object, enum.Enum):\n return \"enum\"\n\n for tp in self._sequence_typesets:\n if isinstance(numeric_object, tp):\n return \"sequence\"\n\n for tp in self._exact_typesets:\n if isinstance(numeric_object, tp):\n return \"exact\"\n\n for tp in self._complex_types:\n if isinstance(numeric_object, tp):\n return \"complex\"\n\n for tp in self._approx_typesets:\n if isinstance(numeric_object, tp):\n return \"approximate\"\n\n return \"unknown\"\n\n def _fix_dtype(self, dtype):\n \"\"\"\n Fix the given *dtype* for comparison.\n \"\"\"\n # Under 64-bit Windows, Numpy may return either int32 or int64\n # arrays depending on the function.\n if (sys.platform == 'win32' and sys.maxsize > 2**32 and\n dtype == np.dtype('int32')):\n return np.dtype('int64')\n else:\n return dtype\n\n def _fix_strides(self, arr):\n \"\"\"\n Return the strides of the given array, fixed for comparison.\n Strides for 0- or 1-sized dimensions are ignored.\n \"\"\"\n if arr.size == 0:\n return [0] * arr.ndim\n else:\n return [stride / arr.itemsize\n for (stride, shape) in zip(arr.strides, arr.shape)\n if shape > 1]\n\n def assertStridesEqual(self, first, second):\n \"\"\"\n Test that two arrays have the same shape and strides.\n \"\"\"\n self.assertEqual(first.shape, second.shape, \"shapes differ\")\n self.assertEqual(first.itemsize, second.itemsize, \"itemsizes differ\")\n self.assertEqual(self._fix_strides(first), self._fix_strides(second),\n \"strides differ\")\n\n def assertPreciseEqual(self, first, second, prec='exact', ulps=1,\n msg=None, ignore_sign_on_zero=False,\n abs_tol=None\n ):\n \"\"\"\n Versatile equality testing function with more built-in checks than\n standard assertEqual().\n\n For arrays, test that layout, dtype, shape are identical, and\n recursively call assertPreciseEqual() on the contents.\n\n For other sequences, recursively call assertPreciseEqual() on\n the contents.\n\n For scalars, test that two scalars or have similar types and are\n equal up to a computed precision.\n If the scalars are instances of exact types or if *prec* is\n 'exact', they are compared exactly.\n If the scalars are instances of inexact types (float, complex)\n and *prec* is not 'exact', then the number of significant bits\n is computed according to the value of *prec*: 53 bits if *prec*\n is 'double', 24 bits if *prec* is single. This number of bits\n can be lowered by raising the *ulps* value.\n ignore_sign_on_zero can be set to True if zeros are to be considered\n equal regardless of their sign bit.\n abs_tol if this is set to a float value its value is used in the\n following. If, however, this is set to the string \"eps\" then machine\n precision of the type(first) is used in the following instead. This\n kwarg is used to check if the absolute difference in value between first\n and second is less than the value set, if so the numbers being compared\n are considered equal. (This is to handle small numbers typically of\n magnitude less than machine precision).\n\n Any value of *prec* other than 'exact', 'single' or 'double'\n will raise an error.\n \"\"\"\n try:\n self._assertPreciseEqual(first, second, prec, ulps, msg,\n ignore_sign_on_zero, abs_tol)\n except AssertionError as exc:\n failure_msg = str(exc)\n # Fall off of the 'except' scope to avoid Python 3 exception\n # chaining.\n else:\n return\n # Decorate the failure message with more information\n self.fail(\"when comparing %s and %s: %s\" % (first, second, failure_msg))\n\n def _assertPreciseEqual(self, first, second, prec='exact', ulps=1,\n msg=None, ignore_sign_on_zero=False,\n abs_tol=None):\n \"\"\"Recursive workhorse for assertPreciseEqual().\"\"\"\n\n def _assertNumberEqual(first, second, delta=None):\n if (delta is None or first == second == 0.0\n or math.isinf(first) or math.isinf(second)):\n self.assertEqual(first, second, msg=msg)\n # For signed zeros\n if not ignore_sign_on_zero:\n try:\n if math.copysign(1, first) != math.copysign(1, second):\n self.fail(\n self._formatMessage(msg,\n \"%s != %s\" %\n (first, second)))\n except TypeError:\n pass\n else:\n self.assertAlmostEqual(first, second, delta=delta, msg=msg)\n\n first_family = self._detect_family(first)\n second_family = self._detect_family(second)\n\n assertion_message = \"Type Family mismatch. (%s != %s)\" % (first_family,\n second_family)\n if msg:\n assertion_message += ': %s' % (msg,)\n self.assertEqual(first_family, second_family, msg=assertion_message)\n\n # We now know they are in the same comparison family\n compare_family = first_family\n\n # For recognized sequences, recurse\n if compare_family == \"ndarray\":\n dtype = self._fix_dtype(first.dtype)\n self.assertEqual(dtype, self._fix_dtype(second.dtype))\n self.assertEqual(first.ndim, second.ndim,\n \"different number of dimensions\")\n self.assertEqual(first.shape, second.shape,\n \"different shapes\")\n self.assertEqual(first.flags.writeable, second.flags.writeable,\n \"different mutability\")\n # itemsize is already checked by the dtype test above\n self.assertEqual(self._fix_strides(first),\n self._fix_strides(second), \"different strides\")\n if first.dtype != dtype:\n first = first.astype(dtype)\n if second.dtype != dtype:\n second = second.astype(dtype)\n for a, b in zip(first.flat, second.flat):\n self._assertPreciseEqual(a, b, prec, ulps, msg,\n ignore_sign_on_zero, abs_tol)\n return\n\n elif compare_family == \"sequence\":\n self.assertEqual(len(first), len(second), msg=msg)\n for a, b in zip(first, second):\n self._assertPreciseEqual(a, b, prec, ulps, msg,\n ignore_sign_on_zero, abs_tol)\n return\n\n elif compare_family == \"exact\":\n exact_comparison = True\n\n elif compare_family in [\"complex\", \"approximate\"]:\n exact_comparison = False\n\n elif compare_family == \"enum\":\n self.assertIs(first.__class__, second.__class__)\n self._assertPreciseEqual(first.value, second.value,\n prec, ulps, msg,\n ignore_sign_on_zero, abs_tol)\n return\n\n elif compare_family == \"unknown\":\n # Assume these are non-numeric types: we will fall back\n # on regular unittest comparison.\n self.assertIs(first.__class__, second.__class__)\n exact_comparison = True\n\n else:\n assert 0, \"unexpected family\"\n\n # If a Numpy scalar, check the dtype is exactly the same too\n # (required for datetime64 and timedelta64).\n if hasattr(first, 'dtype') and hasattr(second, 'dtype'):\n self.assertEqual(first.dtype, second.dtype)\n\n # Mixing bools and non-bools should always fail\n if (isinstance(first, self._bool_types) !=\n isinstance(second, self._bool_types)):\n assertion_message = (\"Mismatching return types (%s vs. %s)\"\n % (first.__class__, second.__class__))\n if msg:\n assertion_message += ': %s' % (msg,)\n self.fail(assertion_message)\n\n try:\n if cmath.isnan(first) and cmath.isnan(second):\n # The NaNs will compare unequal, skip regular comparison\n return\n except TypeError:\n # Not floats.\n pass\n\n # if absolute comparison is set, use it\n if abs_tol is not None:\n if abs_tol == \"eps\":\n rtol = np.finfo(type(first)).eps\n elif isinstance(abs_tol, float):\n rtol = abs_tol\n else:\n raise ValueError(\"abs_tol is not \\\"eps\\\" or a float, found %s\"\n % abs_tol)\n if abs(first - second) < rtol:\n return\n\n exact_comparison = exact_comparison or prec == 'exact'\n\n if not exact_comparison and prec != 'exact':\n if prec == 'single':\n bits = 24\n elif prec == 'double':\n bits = 53\n else:\n raise ValueError(\"unsupported precision %r\" % (prec,))\n k = 2 ** (ulps - bits - 1)\n delta = k * (abs(first) + abs(second))\n else:\n delta = None\n if isinstance(first, self._complex_types):\n _assertNumberEqual(first.real, second.real, delta)\n _assertNumberEqual(first.imag, second.imag, delta)\n elif isinstance(first, (np.timedelta64, np.datetime64)):\n # Since Np 1.16 NaT == NaT is False, so special comparison needed\n if numpy_support.numpy_version >= (1, 16) and np.isnat(first):\n self.assertEqual(np.isnat(first), np.isnat(second))\n else:\n _assertNumberEqual(first, second, delta)\n else:\n _assertNumberEqual(first, second, delta)\n\n def run_nullary_func(self, pyfunc, flags):\n \"\"\"\n Compile the 0-argument *pyfunc* with the given *flags*, and check\n it returns the same result as the pure Python function.\n The got and expected results are returned.\n \"\"\"\n cr = compile_isolated(pyfunc, (), flags=flags)\n cfunc = cr.entry_point\n expected = pyfunc()\n got = cfunc()\n self.assertPreciseEqual(got, expected)\n return got, expected\n\n\nclass SerialMixin(object):\n \"\"\"Mixin to mark test for serial execution.\n \"\"\"\n _numba_parallel_test_ = False\n\n\n# Various helpers\n\[email protected]\ndef override_config(name, value):\n \"\"\"\n Return a context manager that temporarily sets Numba config variable\n *name* to *value*. *name* must be the name of an existing variable\n in numba.config.\n \"\"\"\n old_value = getattr(config, name)\n setattr(config, name, value)\n try:\n yield\n finally:\n setattr(config, name, old_value)\n\n\[email protected]\ndef override_env_config(name, value):\n \"\"\"\n Return a context manager that temporarily sets an Numba config environment\n *name* to *value*.\n \"\"\"\n old = os.environ.get(name)\n os.environ[name] = value\n config.reload_config()\n\n try:\n yield\n finally:\n if old is None:\n # If it wasn't set originally, delete the environ var\n del os.environ[name]\n else:\n # Otherwise, restore to the old value\n os.environ[name] = old\n # Always reload config\n config.reload_config()\n\n\ndef compile_function(name, code, globs):\n \"\"\"\n Given a *code* string, compile it with globals *globs* and return\n the function named *name*.\n \"\"\"\n co = compile(code.rstrip(), \"<string>\", \"single\")\n ns = {}\n eval(co, globs, ns)\n return ns[name]\n\ndef tweak_code(func, codestring=None, consts=None):\n \"\"\"\n Tweak the code object of the given function by replacing its\n *codestring* (a bytes object) and *consts* tuple, optionally.\n \"\"\"\n co = func.__code__\n tp = type(co)\n if codestring is None:\n codestring = co.co_code\n if consts is None:\n consts = co.co_consts\n if utils.PYVERSION >= (3, 8):\n new_code = tp(co.co_argcount, co.co_posonlyargcount,\n co.co_kwonlyargcount, co.co_nlocals,\n co.co_stacksize, co.co_flags, codestring,\n consts, co.co_names, co.co_varnames,\n co.co_filename, co.co_name, co.co_firstlineno,\n co.co_lnotab)\n else:\n new_code = tp(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,\n co.co_stacksize, co.co_flags, codestring,\n consts, co.co_names, co.co_varnames,\n co.co_filename, co.co_name, co.co_firstlineno,\n co.co_lnotab)\n func.__code__ = new_code\n\n\n_trashcan_dir = 'numba-tests'\n\nif os.name == 'nt':\n # Under Windows, gettempdir() points to the user-local temp dir\n _trashcan_dir = os.path.join(tempfile.gettempdir(), _trashcan_dir)\nelse:\n # Mix the UID into the directory name to allow different users to\n # run the test suite without permission errors (issue #1586)\n _trashcan_dir = os.path.join(tempfile.gettempdir(),\n \"%s.%s\" % (_trashcan_dir, os.getuid()))\n\n# Stale temporary directories are deleted after they are older than this value.\n# The test suite probably won't ever take longer than this...\n_trashcan_timeout = 24 * 3600 # 1 day\n\ndef _create_trashcan_dir():\n try:\n os.mkdir(_trashcan_dir)\n except FileExistsError:\n pass\n\ndef _purge_trashcan_dir():\n freshness_threshold = time.time() - _trashcan_timeout\n for fn in sorted(os.listdir(_trashcan_dir)):\n fn = os.path.join(_trashcan_dir, fn)\n try:\n st = os.stat(fn)\n if st.st_mtime < freshness_threshold:\n shutil.rmtree(fn, ignore_errors=True)\n except OSError as e:\n # In parallel testing, several processes can attempt to\n # remove the same entry at once, ignore.\n pass\n\ndef _create_trashcan_subdir(prefix):\n _purge_trashcan_dir()\n path = tempfile.mkdtemp(prefix=prefix + '-', dir=_trashcan_dir)\n return path\n\ndef temp_directory(prefix):\n \"\"\"\n Create a temporary directory with the given *prefix* that will survive\n at least as long as this process invocation. The temporary directory\n will be eventually deleted when it becomes stale enough.\n\n This is necessary because a DLL file can't be deleted while in use\n under Windows.\n\n An interesting side-effect is to be able to inspect the test files\n shortly after a test suite run.\n \"\"\"\n _create_trashcan_dir()\n return _create_trashcan_subdir(prefix)\n\n\ndef import_dynamic(modname):\n \"\"\"\n Import and return a module of the given name. Care is taken to\n avoid issues due to Python's internal directory caching.\n \"\"\"\n import importlib\n importlib.invalidate_caches()\n __import__(modname)\n return sys.modules[modname]\n\n\n# From CPython\n\[email protected]\ndef captured_output(stream_name):\n \"\"\"Return a context manager used by captured_stdout/stdin/stderr\n that temporarily replaces the sys stream *stream_name* with a StringIO.\"\"\"\n orig_stdout = getattr(sys, stream_name)\n setattr(sys, stream_name, io.StringIO())\n try:\n yield getattr(sys, stream_name)\n finally:\n setattr(sys, stream_name, orig_stdout)\n\ndef captured_stdout():\n \"\"\"Capture the output of sys.stdout:\n\n with captured_stdout() as stdout:\n print(\"hello\")\n self.assertEqual(stdout.getvalue(), \"hello\\n\")\n \"\"\"\n return captured_output(\"stdout\")\n\ndef captured_stderr():\n \"\"\"Capture the output of sys.stderr:\n\n with captured_stderr() as stderr:\n print(\"hello\", file=sys.stderr)\n self.assertEqual(stderr.getvalue(), \"hello\\n\")\n \"\"\"\n return captured_output(\"stderr\")\n\n\[email protected]\ndef capture_cache_log():\n with captured_stdout() as out:\n with override_config('DEBUG_CACHE', True):\n yield out\n\n\nclass MemoryLeak(object):\n\n __enable_leak_check = True\n\n def memory_leak_setup(self):\n # Clean up any NRT-backed objects hanging in a dead reference cycle\n gc.collect()\n self.__init_stats = rtsys.get_allocation_stats()\n\n def memory_leak_teardown(self):\n if self.__enable_leak_check:\n self.assert_no_memory_leak()\n\n def assert_no_memory_leak(self):\n old = self.__init_stats\n new = rtsys.get_allocation_stats()\n total_alloc = new.alloc - old.alloc\n total_free = new.free - old.free\n total_mi_alloc = new.mi_alloc - old.mi_alloc\n total_mi_free = new.mi_free - old.mi_free\n self.assertEqual(total_alloc, total_free)\n self.assertEqual(total_mi_alloc, total_mi_free)\n\n def disable_leak_check(self):\n # For per-test use when MemoryLeakMixin is injected into a TestCase\n self.__enable_leak_check = False\n\n\nclass MemoryLeakMixin(MemoryLeak):\n\n def setUp(self):\n super(MemoryLeakMixin, self).setUp()\n self.memory_leak_setup()\n\n def tearDown(self):\n super(MemoryLeakMixin, self).tearDown()\n gc.collect()\n self.memory_leak_teardown()\n\n\[email protected]\ndef forbid_codegen():\n \"\"\"\n Forbid LLVM code generation during the execution of the context\n manager's enclosed block.\n\n If code generation is invoked, a RuntimeError is raised.\n \"\"\"\n from numba.core import codegen\n patchpoints = ['CodeLibrary._finalize_final_module']\n\n old = {}\n def fail(*args, **kwargs):\n raise RuntimeError(\"codegen forbidden by test case\")\n try:\n # XXX use the mock library instead?\n for name in patchpoints:\n parts = name.split('.')\n obj = codegen\n for attrname in parts[:-1]:\n obj = getattr(obj, attrname)\n attrname = parts[-1]\n value = getattr(obj, attrname)\n assert callable(value), (\"%r should be callable\" % name)\n old[obj, attrname] = value\n setattr(obj, attrname, fail)\n yield\n finally:\n for (obj, attrname), value in old.items():\n setattr(obj, attrname, value)\n\n\n# For details about redirection of file-descriptor, read\n# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/\n\[email protected]\ndef redirect_fd(fd):\n \"\"\"\n Temporarily redirect *fd* to a pipe's write end and return a file object\n wrapping the pipe's read end.\n \"\"\"\n\n from numba import _helperlib\n libnumba = ctypes.CDLL(_helperlib.__file__)\n\n libnumba._numba_flush_stdout()\n save = os.dup(fd)\n r, w = os.pipe()\n try:\n os.dup2(w, fd)\n yield io.open(r, \"r\")\n finally:\n libnumba._numba_flush_stdout()\n os.close(w)\n os.dup2(save, fd)\n os.close(save)\n\n\ndef redirect_c_stdout():\n \"\"\"Redirect C stdout\n \"\"\"\n fd = sys.__stdout__.fileno()\n return redirect_fd(fd)\n\n\ndef run_in_new_process_caching(func, cache_dir_prefix=__name__, verbose=True):\n \"\"\"Spawn a new process to run `func` with a temporary cache directory.\n\n The childprocess's stdout and stderr will be captured and redirected to\n the current process's stdout and stderr.\n\n Returns\n -------\n ret : dict\n exitcode: 0 for success. 1 for exception-raised.\n stdout: str\n stderr: str\n \"\"\"\n cache_dir = temp_directory(cache_dir_prefix)\n return run_in_new_process_in_cache_dir(func, cache_dir, verbose=verbose)\n\n\ndef run_in_new_process_in_cache_dir(func, cache_dir, verbose=True):\n \"\"\"Spawn a new process to run `func` with a temporary cache directory.\n\n The childprocess's stdout and stderr will be captured and redirected to\n the current process's stdout and stderr.\n\n Similar to ``run_in_new_process_caching()`` but the ``cache_dir`` is a\n directory path instead of a name prefix for the directory path.\n\n Returns\n -------\n ret : dict\n exitcode: 0 for success. 1 for exception-raised.\n stdout: str\n stderr: str\n \"\"\"\n ctx = mp.get_context('spawn')\n qout = ctx.Queue()\n with override_env_config('NUMBA_CACHE_DIR', cache_dir):\n proc = ctx.Process(target=_remote_runner, args=[func, qout])\n proc.start()\n proc.join()\n stdout = qout.get_nowait()\n stderr = qout.get_nowait()\n if verbose and stdout.strip():\n print()\n print('STDOUT'.center(80, '-'))\n print(stdout)\n if verbose and stderr.strip():\n print(file=sys.stderr)\n print('STDERR'.center(80, '-'), file=sys.stderr)\n print(stderr, file=sys.stderr)\n return {\n 'exitcode': proc.exitcode,\n 'stdout': stdout,\n 'stderr': stderr,\n }\n\n\ndef _remote_runner(fn, qout):\n \"\"\"Used by `run_in_new_process_caching()`\n \"\"\"\n with captured_stderr() as stderr:\n with captured_stdout() as stdout:\n try:\n fn()\n except Exception:\n traceback.print_exc()\n exitcode = 1\n else:\n exitcode = 0\n qout.put(stdout.getvalue())\n qout.put(stderr.getvalue())\n sys.exit(exitcode)\n\nclass CheckWarningsMixin(object):\n @contextlib.contextmanager\n def check_warnings(self, messages, category=RuntimeWarning):\n with warnings.catch_warnings(record=True) as catch:\n warnings.simplefilter(\"always\")\n yield\n found = 0\n for w in catch:\n for m in messages:\n if m in str(w.message):\n self.assertEqual(w.category, category)\n found += 1\n self.assertEqual(found, len(messages))\n" ]
[ [ "numpy.random.RandomState", "numpy.dtype", "numpy.isnat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NekoKedama/MachineLearning-Sklearn
[ "1ff8c20815c06ae3ec2dd0ab6eb9d027323b74e3" ]
[ "Decision Tree/Decision Tree.py" ]
[ "import os\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import f1_score\n\ndata_dir = \"DT_csv\"\nx_train = []\ny_train = []\n\n\ndef get_F1(x_test, y_test, DP):\n dtc = DecisionTreeClassifier(max_depth=DP)\n dtc.fit(x_train, y_train)\n y_pred = dtc.predict(x_test)\n f1 = f1_score(y_test, y_pred, average='micro')\n return f1\n\n\nfor filename in os.listdir(data_dir)[::-1]:\n print(filename)\n\n with open(os.path.join(data_dir, filename)) as file:\n\n if \"train\" in filename:\n X = []\n y = []\n title = file.readline()\n cnt = 0\n while True:\n tmp = file.readline()\n if not tmp:\n break\n\n tmp = tmp.split(',')\n tmp = list(map(int, tmp))\n x_train.append(tmp[0:len(tmp) - 1])\n y_train.append(tmp[len(tmp) - 1])\n\n cnt = cnt + 1\n\n else:\n title = file.readline()\n tot = 0\n ac = 0\n x_test = []\n y_test = []\n while True:\n tmp = file.readline()\n if not tmp:\n break\n tot = tot + 1\n tmp = tmp.split(',')\n tmp = list(map(int, tmp))\n x_test_tmp = tmp[0:len(tmp) - 1]\n y_test_tmp = tmp[len(tmp) - 1]\n\n x_test.append(x_test_tmp)\n y_test.append(y_test_tmp)\n\n x_test = np.array(x_test)\n y_test = np.array(y_test)\n\n aix = []\n aiy = []\n for i in range(1, 20):\n aix.append(i)\n F1 = get_F1(x_test, y_test, i)\n aiy.append(F1)\n print(i, '--> f1 =', F1)\n\n plt.figure(1)\n plt.plot(aix, aiy)\n plt.show()\n\n x_train.clear()\n y_train.clear()\n\n pass\n" ]
[ [ "matplotlib.pyplot.plot", "sklearn.tree.DecisionTreeClassifier", "sklearn.metrics.f1_score", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Extreme-classification/ECLARE
[ "ca9f52842f2b5f45278eac50cd48c8b67bdfb4c5" ]
[ "ECLARE/models/transform_layer.py" ]
[ "from models.custom_embeddings import CustomEmbedding\nfrom libs.utils import resolve_schema_args, fetch_json\nfrom torch.nn.parameter import Parameter\nimport torch.nn as nn\nimport numpy as np\nimport torch\n\n\nclass scaled_spectral(nn.Module):\n def __init__(self, hidd_dims, k=1):\n super(scaled_spectral, self).__init__()\n self.layer = nn.utils.spectral_norm(nn.Linear(hidd_dims, hidd_dims))\n self.k = Parameter(torch.Tensor(1, 1))\n self.k.data.fill_(k)\n self.init()\n\n def forward(self, input):\n return self.k*self.layer(input)\n\n def extra_repr(self):\n return \"spectral_radius = {}\".format(self.k.data.numpy().shape)\n\n @property\n def stats(self):\n return \"%f\" % (self.k.detach().cpu().numpy()[0, 0])\n\n def init(self):\n nn.init.eye_(self.layer.weight)\n nn.init.constant_(self.layer.bias, 0.0)\n\n\nclass spectral_attention(nn.Module):\n def __init__(self, input_dims, degree, norm=\"softmax\"):\n super(spectral_attention, self).__init__()\n self.drop = nn.Dropout(p=0.2)\n self.trans = nn.utils.spectral_norm(nn.Linear(input_dims, input_dims, True))\n self.layer = nn.utils.spectral_norm(nn.Linear(input_dims*degree, degree, False))\n self.nl = nn.ReLU()\n self.softmax = nn.Softmax(dim=1)\n self.init()\n\n def attented(self, input, debug=False):\n _input = self.trans(self.nl(self.drop(input)))\n logits = self.layer(_input.flatten(start_dim=1))\n attn_wts = self.softmax(logits.squeeze())\n # if debug:\n # wts = attn_wts.detach().unsqueeze(1)\n # v = wts[:, :,:2].bmm(input[:,:2,:]).squeeze()\n # u = wts.bmm(input).squeeze() - v\n # print(v.shape, u.shape)\n # v = torch.norm(v.detach(), dim=1).squeeze().numpy()\n # u = torch.norm(u.detach(), dim=1).squeeze().numpy()\n # weights = np.vstack([v, u]).T\n # np.save(\"norm.npy\", weights)\n # print(np.mean(weights, axis=0))\n # print(attn_wts.mean(axis=0).detach().cpu().numpy())\n return attn_wts\n\n def forward(self, input, debug=False):\n return self.attented(input, debug).unsqueeze(1).bmm(input).squeeze()\n\n def init(self):\n nn.init.xavier_uniform_(self.layer.weight)\n nn.init.xavier_uniform_(self.trans.weight)\n self.trans.bias.data.fill_(0)\n\n\nclass coff(nn.Module):\n def __init__(self, input_dims, fill_val=1, nl=None):\n super(coff, self).__init__()\n self.k = Parameter(torch.Tensor(1, input_dims))\n self.k.data.fill_(fill_val)\n self.nl = nn.Identity()\n if nl == \"sigmoid\":\n self.nl = nn.Sigmoid()\n elif nl == \"tanh\":\n self.nl = nn.Tanh()\n\n def forward(self, input):\n return self.nl(self.k)*input\n\n def extra_repr(self):\n return \"coff = {}\".format(self.nl(self.k).data.numpy().shape)\n\n @property\n def stats(self):\n return \"%0.2f\" % (self.nl(self.k).detach().mean().cpu().numpy())\n\n\nclass Rpp(nn.Module):\n def __init__(self, input_size, output_size, dropout, nonLin=\"r\", k=1):\n super(Rpp, self).__init__()\n self.name = \"SR\"\n self.input_size = input_size\n self.output_size = output_size\n self.dropout = dropout\n self.nonLin = nonLin\n self.padding_size = self.output_size - self.input_size\n elements = []\n if self.nonLin == 'r':\n elements.append(nn.ReLU())\n elif self.nonLin == 'lr':\n elements.append(nn.LeakyReLU())\n else:\n pass\n if dropout > 0.0:\n elements.append(nn.Dropout(p=dropout))\n self.nonLin = nn.Sequential(*elements)\n self.scaling = scaled_spectral(self.input_size, k)\n\n def forward(self, embed):\n return self.scaling(self.nonLin(embed)) + embed\n\n @property\n def stats(self):\n name = self.name\n s = \"{}(K={})\".format(name, self.scaling.stats)\n return s\n\n\nclass CRpp(Rpp):\n def __init__(self, input_size, output_size, dropout, nonLin=\"r\",\n k=1, non_linearity=\"sigmoid\", fill_val=0):\n super(CRpp, self).__init__(input_size, output_size,\n dropout, nonLin, k)\n self.a = coff(input_size, fill_val, non_linearity)\n\n def forward(self, embed):\n return self.a(super(CRpp, self).forward(embed))\n\n @property\n def stats(self):\n name = self.name\n s = \"{}(A={}, K={})\".format(\n name, self.a.stats, self.scaling.stats)\n return s\n\n\nelements = {\n 'relu': nn.ReLU,\n 'R': Rpp,\n 'cR': CRpp,\n 'BoW': CustomEmbedding,\n \"light\": coff,\n \"dropout\": nn.Dropout\n}\n\n\nclass Transform(nn.Module):\n \"\"\"\n Transform document representation!\n transform_string: string for sequential pipeline\n eg relu#,dropout#p:0.1,residual#input_size:300-output_size:300\n params: dictionary like object for default params\n eg {emb_size:300}\n \"\"\"\n\n def __init__(self, modules, device=\"cuda:0\"):\n super(Transform, self).__init__()\n self.device = device\n self.transform = nn.Sequential(*modules)\n\n def forward(self, embed):\n \"\"\"\n Forward pass for transform layer\n Args:\n embed: torch.Tensor: document representation\n Returns:\n embed: torch.Tensor: transformed document representation\n \"\"\"\n return self.transform(embed)\n\n def to(self):\n super().to(self.device)\n\n def __getattr__(self, attr):\n try:\n return super().__getattr__(attr)\n except AttributeError:\n for module in super().__getattr__('transform'):\n try:\n return module.__getattribute__(attr)\n except AttributeError:\n return module.__getattr__(attr)\n raise AttributeError(\"{} not found\".format(attr))\n\ndef get_functions(obj, params=None):\n return list(map(lambda x: elements[x](**obj[x]), obj['order']))\n" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.Sequential", "torch.Tensor", "torch.nn.init.eye_", "torch.nn.init.constant_", "torch.nn.Sigmoid", "torch.nn.Tanh", "torch.nn.Linear", "torch.nn.Identity", "torch.nn.LeakyReLU", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Satyajeet-code/the-code-land
[ "910210eadf4cfacbc5fe6be91039253e5656c066" ]
[ "python/scikit learn-pandas- K Nearest Neighbour(KNN) Algorithm/scikit learn-pandas- K Nearest Neighbour(KNN) Algorithm.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#importing the required libraries\nimport pandas as pd\nfrom sklearn import preprocessing \nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import neighbors\nknn=neighbors.KNeighborsClassifier(n_neighbors=3)\nle=preprocessing.LabelEncoder()\n\n#loading the dataset\ndataset=pd.read_csv(\"C:/Users/ASUS/Downloads/train.csv\")\n\n#getting some information about the data\ndataset.info()\ndataset.describe()\ndataset.isnull().sum()\n\n#Dropping the columns that are not needed\ndataset=dataset.drop('Cabin', axis=1)\ndataset=dataset.drop('Name', axis=1)\ndataset=dataset.drop('PassengerId', axis=1)\ndataset=dataset.drop('Ticket', axis=1)\n\n#defining a function that will fetch the dependent variable from the user.\ndef knn(value):\n \n#Label encoding the variables with object type\n dataset['Sex']=le.fit_transform(dataset['Sex'])\n dataset['Embarked']=le.fit_transform(dataset['Embarked'])\n \n#Defining the Dependent and independent variables\n X=dataset.drop([value],axis=1)\n y=dataset[value] \n\n#Splitting the dataset in traing set and testing set where the training set is 70% of the whole dataset \n#and testing set is 30% of the whole dataset \n X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)\n \n#fitiing the model\n knn.fit(X_train,y_train).score(X_test,y_test)\n #print(accuracy_score(y_test,y_pred,normalize=True))\n\n#Preedicting on the unseen data (test set)\n y_pred =knn.predict(X_test)\n \n#Printing the accuracy score and confusion matrix\n print(\"The accuracy score is:\")\n print(accuracy_score(y_test,y_pred,normalize=True))\n print(\"--------------------------------------\")\n print(\"The confusion matrix is:\")\n print(confusion_matrix(y_test,y_pred))\n \n#calling ht e function and passing the independent variable\nknn('Sex')\n\n\"\"\"\nNote: Some parts like EDA is not fully shown in this example as this example focuses on getting the \nessence of the KNN algorithm.\n\n\"\"\"\n\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split", "sklearn.metrics.confusion_matrix", "sklearn.neighbors.KNeighborsClassifier", "sklearn.preprocessing.LabelEncoder", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
aws-samples/real-time-churn-prediction-with-amazon-connect-and-amazon-sagemaker
[ "967cd117d87b12b8efed2a314948a6cabc2554e6" ]
[ "scripts/create_dataset.py" ]
[ "import argparse\nimport pathlib\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# Parse argument variables passed via the CreateDataset processing step\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--athena-data\", type=str)\nargs = parser.parse_args()\n\n\ndataset = pd.read_parquet(args.athena_data, engine=\"pyarrow\")\n\ntrain_df, val_df = train_test_split(dataset, test_size=0.2, random_state=42)\nval_df, test_df = train_test_split(val_df, test_size=0.05, random_state=42)\n\n# Write train, validation splits to output path\ntrain_output_path = pathlib.Path(\"/opt/ml/processing/output/train\")\nval_output_path = pathlib.Path(\"/opt/ml/processing/output/validation\")\ntest_output_path = pathlib.Path(\"/opt/ml/processing/output/test\")\nbaseline_output_path = pathlib.Path(\"/opt/ml/processing/output/baseline\")\n\ntrain_df.to_csv(train_output_path / \"train.csv\", index=False)\nval_df.to_csv(val_output_path / \"validation.csv\", index=False, header=False)\ntest_df.to_csv(test_output_path / \"test.csv\", index=False, header=False)\n\nbaseline = list(train_df.agg(\n {\n 'account_length': 'mean', \n 'vmail_message': 'mean', \n 'day_mins': 'mean',\n 'day_calls': 'mean', \n 'eve_mins': 'mean', \n 'eve_calls': 'mean', \n 'night_mins': 'mean', \n 'night_calls': 'mean',\n 'intl_mins': 'mean', \n 'intl_calls': 'mean', \n 'custserv_calls': 'mean', \n 'sentiment': 'mode',\n 'pastsenti_nut': 'mean', \n 'pastsenti_pos': 'mean', \n 'pastsenti_neg': 'mean', \n 'mth_remain': 'mean',\n 'int_l_plan_no': 'mean', \n 'int_l_plan_yes': 'mean', \n 'vmail_plan_no': 'mode', \n 'vmail_plan_yes': 'mode',\n }\n).iloc[0])\n\npd.DataFrame(baseline).to_csv(baseline_output_path / 'baseline.csv', index=False, header=None)" ]
[ [ "pandas.read_parquet", "sklearn.model_selection.train_test_split", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
erigler-usgs/bezpy
[ "9f9e5727cb80496802424292562ac3ec447e5b34" ]
[ "bezpy/mt/utils.py" ]
[ "\"\"\"Helper functions for magnetotelluric data.\"\"\"\n\n__all__ = [\"apparent_resistivity\"]\n\nimport numpy as np\n\n\ndef apparent_resistivity(periods, Z, Z_var=None):\n \"\"\"Calculates the apparent resistivity for the given periods and Z.\"\"\"\n if Z_var is None:\n Z_var = np.ones((Z.shape)) + np.ones((Z.shape))*1j\n\n # Ignore warnings because of nan possibilities. Just push those through\n with np.errstate(divide='ignore', invalid='ignore'):\n Z_std = np.sqrt(Z_var/2.) # pylint: disable=invalid-name\n resistivity = periods/5. * np.abs(Z)**2\n resistivity_std = 2*Z_std*periods/5. * np.abs(Z)\n\n phase = np.rad2deg(np.arctan(np.tan(np.angle(Z))))\n # Y components adjusted by pi\n # phase[2:,:] = np.rad2deg(np.arctan(np.tan(np.angle(Z[2:,:]) + np.pi)))\n phase_std = np.rad2deg(np.abs(Z_std / Z))\n\n return (resistivity, resistivity_std, phase, phase_std)\n" ]
[ [ "numpy.abs", "numpy.sqrt", "numpy.ones", "numpy.errstate", "numpy.angle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sytk/SidebySide
[ "891e311391b59dd0900908b68a89df00a1cda44b" ]
[ "ML/learning.py" ]
[ "import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\n\nX_train = np.load('./X_train.npy')\nY_train = np.load('./Y_train.npy')\nX_test = np.load('./X_test.npy')\nY_test = np.load('./Y_test.npy')\nprint(X_train.shape)\nprint(X_train.min())\n\nmodel = keras.Sequential([\n # keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Input(42),\n keras.layers.Dense(128, activation='relu'),\n # keras.layers.Dense(64, activation='relu'),\n keras.layers.Dense(10, activation='softmax')\n])\n#\n# inputs = keras.layers.Input(shape=(42))\n# x = keras.layers.Dense(128, activation='relu')(inputs)\n# outputs = keras.layers.Dense(10, activation='softmax')(x)\n\n# inputs = keras.layers.Dense(128, activation='relu', input_dim=42)\n# outputs = keras.layers.Dense(10, activation='softmax')(inputs)\n\n\n# model = keras.models.Model(inputs=inputs, outputs=outputs)\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(X_train,\n Y_train,\n epochs=500,\n batch_size=128,\n validation_data=(X_test, Y_test),\n verbose=2)\n# test_loss, test_acc = model.evaluate(X_test, Y_test, verbose=2)\n# print('\\nTest accuracy:', test_acc)\nmodel.save('./model.h5')\n# keras.models.save_model(model, )\n\n# import tensorflowjs as tfjs\n# tfjs.converters.save_keras_model(model, 'tfjs', quantization_dtype=np.uint8)\n" ]
[ [ "numpy.load", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
jorgessanchez7/Global_Forecast_Validation
[ "d3178acaa2a67801e832554a3f871b36c266fe3a" ]
[ "Evaluate_Monthly_Seasonality_Blue_Nile_Corrected.py" ]
[ "import pandas as pd\nfrom os import path\nimport hydrostats.data as hd\nimport hydrostats.visual as hv\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Blue_Nile_Stations_v2.csv')\n\nCOMIDs = df['COMID'].tolist()\nNames = df['Station'].tolist()\nRivers = df['Stream'].tolist()\n\nERA5_Files = []\nERAI_Files = []\n\nfor comid, name in zip(COMIDs, Names):\n\tERA5_Files.append(\n\t\t'/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Data/Historical/simulated_data/ERA_5/Monthly_Corrected/'\n\t\t+ str(comid) + '_' + str(name) + '.csv')\n\tERAI_Files.append(\n\t\t'/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Data/Historical/simulated_data/ERA_Interim/Monthly_Corrected/'\n\t\t+ str(comid) + '_' + str(name) + '.csv')\n\nfor comid, name, rio, ERA5_File, ERAI_File in zip(COMIDs, Names, Rivers, ERA5_Files, ERAI_Files):\n\tprint(comid, name, rio)\n\n\t#Merging the Data\n\tmerged_df = hd.merge_data(ERAI_File, ERA5_File)\n\n\tmonthly_avg = hd.monthly_average(merged_df)\n\tmonthly_std_error = hd.monthly_std_error(merged_data=merged_df)\n\n\tERA5_monthly_avg = monthly_avg[['Observed']]\n\tERA_Interim_monthly_avg = monthly_avg[['Simulated']]\n\n\tERA5_monthly_std_error = monthly_std_error[['Observed']]\n\tERA_Interim_monthly_std_error = monthly_std_error[['Simulated']]\n\n\tobserved_monthly = pd.read_csv('/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Data/Historical/observed_data/Multiannual_Mean_Streamflow/{0}_{1}.csv'.format(comid, name), dtype={'Month': str})\n\tobserved_monthly.set_index('Month', inplace=True)\n\n\tobserved_monthly_avg = observed_monthly[['Mean Streamflow (m3/s)']]\n\tobserved_monthly_std_error = observed_monthly[['Standard Error']]\n\n\tmonthly_avg_obs_ERA5 = ERA5_monthly_avg.join(observed_monthly_avg)\n\tmonthly_std_error_ERA5 = ERA5_monthly_std_error.join(observed_monthly_std_error)\n\n\tmonthly_avg_obs_ERA_Interim = ERA_Interim_monthly_avg.join(observed_monthly_avg)\n\tmonthly_std_error_ERA_Interim = ERA_Interim_monthly_std_error.join(observed_monthly_std_error)\n\n\thv.plot(merged_data_df=monthly_avg_obs_ERA5, legend=('ERA-5', 'Observed'), grid=True, x_season=True,\n\t # title='Monthly Average Streamflow (Standard Error) for ' + str(\n\t # id) + ' - ' + name + '\\n River: ' + rio + '. COMID: ' + str(comid),\n\t title='Monthly Average Streamflow (Standard Error) for ' + name + '\\n River: '\n\t + rio + '. COMID: ' + str(comid),\n\t labels=['Datetime', 'Streamflow (m$^3$/s)'], linestyles=['b-', 'r-'], fig_size=(15, 9),\n\t ebars=monthly_std_error_ERA5, ecolor=('b', 'r'), tight_xlim=False)\n\t# plt.savefig(path.join(monthly_average_out_dir, '{0}_{1}_monthly_average.png'.format(str(id), name)))\n\tplt.savefig(path.join(\n\t\t'/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Data/Historical/observed_data/Multiannual_Mean_Streamflow',\n\t\t'{0}_{1}_monthly_average_ERA5_Corrected.png'.format(str(comid), name)))\n\n\thv.plot(merged_data_df=monthly_avg_obs_ERA_Interim, legend=('ERA-5', 'Observed'), grid=True, x_season=True,\n\t # title='Monthly Average Streamflow (Standard Error) for ' + str(\n\t # id) + ' - ' + name + '\\n River: ' + rio + '. COMID: ' + str(comid),\n\t title='Monthly Average Streamflow (Standard Error) for ' + name + '\\n River: '\n\t + rio + '. COMID: ' + str(comid),\n\t labels=['Datetime', 'Streamflow (m$^3$/s)'], linestyles=['b-', 'r-'], fig_size=(15, 9),\n\t ebars=monthly_std_error_ERA_Interim, ecolor=('b', 'r'), tight_xlim=False)\n\t# plt.savefig(path.join(monthly_average_out_dir, '{0}_{1}_monthly_average.png'.format(str(id), name)))\n\tplt.savefig(path.join(\n\t\t'/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Data/Historical/observed_data/Multiannual_Mean_Streamflow',\n\t\t'{0}_{1}_monthly_average_ERA-Interim_Corrected.png'.format(str(comid), name)))\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Sanzeed/balanced_influence_maximization
[ "0797b8a8f536cac8023e128ab13eb532f902bcad" ]
[ "models/net_gen/homophilic_net_gen.py" ]
[ "import numpy as np\nimport networkx as nx\n\nfrom base import NetGen\n\n\nclass HomophilicNetGen(NetGen):\n def __init__(self, n, p_M, h, alpha, beta, gamma, delta_in, delta_out):\n super(NetGen, self).__init__(n, p_M, alpha, beta, gamma, delta_in, delta_out)\n self.h = h\n \n def __get_homophily_factor(self, label_1, label_2):\n homophily_factor = self.h if (label_1 == label_2) else (1 - self.h)\n return homophily_factor\n \n def __generate_homophilic_in_probabilities(self, graph, label):\n homophily_factors = np.array([self.__get_homophily_factor(label, \n graph.node[node]['label']) \n for node in range(len(graph))])\n in_degrees = np.array([graph.in_degree(node) \n for node in range(len(graph))])\n \n return ((np.multiply(homophily_factors, in_degrees) + self.delta_in) \n / np.sum(np.multiply(homophily_factors, in_degrees) + self.delta_in))\n \n def __generate_out_probabilities(self, graph):\n out_degrees = np.array([graph.out_degree(node) \n for node in range(len(graph))])\n \n return (out_degrees + self.delta_out) / np.sum(out_degrees + self.delta_out)\n \n def __generate_homophilic_out_probabilities(self, graph, label):\n homophily_factors = np.array([self.__get_homophily_factor(label, \n graph.node[node]['label']) \n for node in range(len(graph))])\n out_degrees = np.array([graph.out_degree(node) \n for node in range(len(graph))])\n \n return ((np.multiply(homophily_factors, out_degrees) + self.delta_out) \n / np.sum(np.multiply(homophily_factors, out_degrees) + self.delta_out))\n \n def __choose_node_by_homophilic_in_degree(self, graph, label):\n nodes = np.array([node for node in range(len(graph))])\n probabilities = self.generate_homophilic_in_probabilities(graph, label)\n return np.random.choice(nodes, 1, False, probabilities)[0]\n \n def __choose_node_by_out_degree(self, graph):\n nodes = np.array([node for node in range(len(graph))])\n probabilities = self.generate_out_probabilities(graph)\n return np.random.choice(nodes, 1, False, probabilities)[0]\n \n def __choose_node_by_homophilic_out_degree(self, graph, label):\n nodes = np.array([node for node in range(len(graph))])\n probabilities = self.generate_homophilic_out_probabilities(graph, label)\n return np.random.choice(nodes, 1, False, probabilities)[0] \n \n def add_new_node_with_out_edge(self, graph):\n v = len(graph)\n label = np.random.choice(['a', 'b'], 1, False, [self.p_M, 1 - self.p_M])[0]\n \n if len(graph) == 1:\n graph.add_edges_from([(v, 0)])\n return\n \n w = self.__choose_node_by_homophilic_in_degree(graph, label)\n graph.add_edges_from([(v, w)])\n \n nx.set_node_attributes(graph, {v : {'label' : label}})\n \n def add_new_edge_between_old_nodes(self, graph):\n if len(graph) == 1:\n return\n \n v = self.__choose_node_by_out_degree(graph)\n w = self.__choose_node_by_homophilic_in_degree(graph, graph.node[v]['label'])\n graph.add_edges_from([(v, w)])\n \n def add_new_node_with_in_edge(self, graph):\n w = len(graph)\n label = np.random.choice(['a', 'b'], 1, False, [self.p_M, 1 - self.p_M])[0]\n \n if len(graph) == 1:\n graph.add_edges_from([(0, w)])\n return\n \n v = self.__choose_node_by_homophilic_out_degree(graph, label)\n graph.add_edges_from([(v, w)])\n \n nx.set_node_attributes(graph, {w : {'label' : label}}) " ]
[ [ "numpy.sum", "numpy.multiply", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dominicamartinez/clustehr
[ "0ce893a666974674fad36591f0156bd720910b4d" ]
[ "src/MASPC.py" ]
[ "import numpy as np\nimport pandas as pd\nimport scipy\nfrom sklearn import metrics\nfrom FPMax import FPMax\nfrom Apriori import Apriori\nfrom scipy.cluster.hierarchy import fcluster\nfrom scipy.cluster.hierarchy import linkage\n\n# MASPC algorithm\nclass MASPC():\n def __init__(self,demographic,dignosisCodes):\n self.demographic = demographic\n self.dignosisCodes = dignosisCodes\n\n def MAS(self,minSup,minAc,minOv):\n # Run FPMax to get MFI \n fpmax = FPMax()\n fpmax.encode_input([])\n fpmax.run(minSup)\n \n # Running Apriori is a preparatory step for getting MFA\n apriori = Apriori()\n apriori.encode_input([])\n apriori.run(minSup)\n \n # This assumes input to be the output of the spmf.jar file\n list_1 = []\n for i in apriori.decode_output():\n if len(i)==3:\n list_1.append(i)\n\n # Get MFA\n all_con=self.get_all_allconfidence(list_1,fpmax.decode_output(),minAc)\n all_con.sort(key=lambda x: x[-1],reverse=True)\n\n all_con_withoutSUP=[]\n for i in all_con:\n all_con_withoutSUP.append([x for x in i[:len(i)-2]])\n \n all_con_target = []\n for i in all_con_withoutSUP: \n flag = 0\n for j in all_con_target:\n if (set(i) & set(j) != set()):\n number = 0\n for k in self.dignosisCodes: \n if ( ( set(k) & (set(i)|set(j)) ) == (set(i)|set(j)) ): \n number = number + 1 \n if number <= minOv: \n flag = 1\n break\n if flag == 0: \n all_con_target.append(i)\n \n all_con_target_without1=[]\n\n for i in all_con_target:\n if len(i) != 1:\n all_con_target_without1.append(i)\n\n # save MFAs\n self.MFAs = all_con_target_without1\n\n # Input a list of MFIs\n # Return MFIs whose All_confidence is above minAc\n def get_all_allconfidence(self,list_1,list_all_max,threshhold):\n all_max=[]\n for i in list_all_max:\n temp_allconfidence = self.allconfidence(list_1,i)\n if temp_allconfidence >= threshhold:\n i[-1] = temp_allconfidence\n all_max.append(i)\n return all_max\n\n def allconfidence(self,list_1,list_max):\n # Compute All_confidence of an itemset\n b=[]\n for i in list_max[:len(list_max)-2]:\n for j in list_1:\n if i==j[0]:\n b.append(int(j[2]))\n return int(list_max[-1])/max(b)\n\n def PC(self,k,method,metric):\n w, h = len(self.MFAs), len(self.dignosisCodes);\n all_con_tables_without1=[[0 for x in range(w)] for y in range(h)] \n\n # project maximum set of independent frequnet patterns \n for i,j in enumerate(self.dignosisCodes):\n temp=set(j)\n \n l=len(temp)\n for a,b in enumerate(self.MFAs): \n while(set(b)<=temp):\n temp=temp.difference(set(b))\n \n all_con_tables_without1[i][a]+=1 \n\n # build a dataframe\n all_con_part_2_without1=pd.DataFrame(all_con_tables_without1, columns=[str(sublist) for sublist in self.MFAs]) \n all_con_final_t_without1=self.demographic.join(all_con_part_2_without1)\n # delete the data that not be subscribed\n all_con_delete_without1= [sum(i) for i in all_con_tables_without1]\n all_con_delete_idex_without1=[i for i, e in enumerate(all_con_delete_without1) if e == 0]\n all_con_final_t_without1.drop(all_con_delete_idex_without1,inplace=True)\n self.binaryData=all_con_final_t_without1\n # do clustering\n all_con_cos_ave_without1 = linkage(all_con_final_t_without1.values, method, metric)\n self.ClusterResult=fcluster(all_con_cos_ave_without1, k, criterion='maxclust')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "scipy.cluster.hierarchy.linkage", "scipy.cluster.hierarchy.fcluster" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
kant/Multilingual-RDF-Verbalizer
[ "227219883d88d67fefd3aad8df54e2b49f165d6d" ]
[ "hierarchical-decoding/layers/PositionalEncoding.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport math\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, hid_dim, device, max_length=100):\n super().__init__()\n\n # Compute the positional encodings once in log space.\n self.pe = torch.zeros(max_length, hid_dim)\n self.position = torch.arange(0, max_length).unsqueeze(1)\n\n self.div_term = torch.exp(torch.arange(0, hid_dim, 2) *\n -(math.log(10000.0) / hid_dim))\n self.pe[:, 0::2] = torch.sin(self.position * self.div_term)\n self.pe[:, 1::2] = torch.cos(self.position * self.div_term)\n\n self.pe = self.pe.unsqueeze(0).to(device)\n\n #self.register_buffer('pe', pe)\n\n def forward(self, x):\n\n x = x + Variable(self.pe[:, :x.size(1)],\n requires_grad=False)\n return x\n" ]
[ [ "torch.sin", "torch.arange", "torch.cos", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eugval/RoboPlay
[ "90a2bdb5077b4c9eed6f0fd63ae33c5cb283eab9" ]
[ "armRobot/RobotArm.py" ]
[ "import numpy as np\nimport collections\n\n\nclass RobotArm(object):\n def __init__(self, initial_effector_positions, dynamics_params_no=-1, dynamics_reset_min=0.3,\n dynamics_reset_range=0.4):\n '''\n Class for a 2D robot arm, with determined dynamics and an arbitrary number of joints.\n :param initial_effector_positions: [list of np arrays of size 2] The list of the effector coordinates of the arm,\n define the configuration of the arm.\n :param dynamics_params_no: [int] Number of dynamics parameters accepted by the arm. If negative, the arm has no\n dynamics.\n :param dynamics_reset_min: [float between 0 and 1] Upon reset, this is the minimum value the dynamics parameters\n of the arm can take\n :param dynamics_reset_range: [float between 0 and 1] The maximum value for the dynamics parameters of the arm\n are dynamics_reset_min + dynamics_reset_range.\n '''\n # The full state of the arm is encoded in the positions of the effectors\n self.__effector_positions = initial_effector_positions\n\n self.check_consistency()\n\n\n # Dynamics reset parameters\n self.dynamics_params_no = dynamics_params_no\n self.dynamics_reset_min = dynamics_reset_min\n self.dynamics_reset_range = dynamics_reset_range\n\n # Add dynamics\n if (dynamics_params_no == -1):\n self.dynamics_params_no = len(initial_effector_positions)\n self.dynamics_reset_min = 0.5\n self.dynamics_reset_range = 0\n self.reset_dynamics()\n else:\n self.reset_dynamics()\n\n @property\n def number_of_effectors(self):\n '''\n Return the number of effector positions in the arm (number of joints + end effector).\n '''\n return len(self.__effector_positions)\n\n @property\n def number_of_angles(self):\n '''\n Return the number of angles (or joints) for control.\n '''\n return len(self.__effector_positions) - 1\n\n @property\n def effector_positions(self):\n '''Return the list of effector positions [(xi,yi)].'''\n return self.__effector_positions.copy()\n\n @effector_positions.setter\n def effector_positions(self, val):\n '''Set the effector positions, check for consistency.'''\n self.__effector_positions = val\n self.check_consistency()\n\n @property\n def origin_position(self):\n '''Return the position of the first effector (origin of the arm).'''\n return np.copy(self.effector_positions[0])\n\n @property\n def end_effector_position(self):\n '''Return the position of the end effector.'''\n return np.copy(self.effector_positions[-1])\n\n @property\n def arm_lengths(self):\n '''Return the arm link lenghts'''\n return self.get_arm_lengths()\n\n @property\n def dynamics_params(self):\n '''Return the dynamics parameters controlling the dynamics of the arm.'''\n return np.copy(self.__dynamics_params)\n\n @property\n def effective_dynamics(self):\n '''Return the dynamics for each joint angle (multiplicative bias).'''\n return np.copy(self.__effective_dynamics)\n\n def effector_positions_vector(self, reverse=True):\n '''Return a numpy array of the concatenated effector positions.'''\n if (reverse):\n # Enf-effector first\n return np.concatenate(self.__effector_positions[::-1])\n else:\n # Origin first\n return np.concatenate(self.__effector_positions)\n\n def set_dynamics(self, params):\n '''Set the dynamics (both effective and parameters) given a new set of parameters.'''\n self.__dynamics_params = params\n self.__effective_dynamics = self.get_dyns_from_params(params)\n\n def reset_dynamics(self):\n '''Reset the dynamics according to the dynamics reset metadata.'''\n # Check there is enough dynamics parameters to get the effective dynamics\n assert len(self.__effector_positions) <= self.dynamics_params_no\n\n dyn_params = []\n for i in range(self.dynamics_params_no):\n dyn_params.append(self.dynamics_reset_range * np.random.rand(1)[0] + self.dynamics_reset_min)\n\n self.set_dynamics(np.array(dyn_params))\n\n def get_dyns_from_params(self, dynamics_params, batch=False):\n ''' Calculate the effective dynamics using the dynamics parameters in some arbitrary way.\n If batch= True, calculated it for a batch of dynamics parameters, where dim = 0 is the batch dimention\n '''\n if(batch):\n dyn = []\n i = 0\n while len(dyn) < len(self.__effector_positions) - 1:\n dyn.append((((dynamics_params[:, i] + dynamics_params[:,i + 1]) * dynamics_params[:,i]) / dynamics_params[:,-1 - i]).reshape((-1,1)))\n i+=1\n return np.concatenate(dyn, axis=1)\n else:\n dyn = []\n i = 0\n while len(dyn) < len(self.__effector_positions) - 1:\n dyn.append(\n ((dynamics_params[i] + dynamics_params[i + 1]) * dynamics_params[i]) / dynamics_params[-1 - i])\n i += 1\n return np.array(dyn)\n\n\n def effective_dynamics_min(self):\n ''' Return the minimum possible value of the effective dynamics given the parameter reset metadata.'''\n return (2 * self.dynamics_reset_min * self.dynamics_reset_min) / (\n self.dynamics_reset_min + self.dynamics_reset_range)\n\n def effective_dynamics_max(self):\n ''' Return the minimum possible value of the effective dynamics given the parameter reset metadata.'''\n dyns_reset_max = self.dynamics_reset_min+self.dynamics_reset_range\n return (2 * dyns_reset_max * dyns_reset_max) / (self.dynamics_reset_min)\n\n def check_consistency(self):\n '''Check that the robot effectors are in the world and that they can reach everywhere within the arm's circle of reach.'''\n\n # All positions must be between 0 and 1\n for pos in self.effector_positions:\n if( pos[0]<0 or pos[0]>1 or pos[1]<0 or pos[1]>1):\n raise ValueError('The robot effector positions are out of range')\n\n # The length of the largest arm needs to be smaller than the sum of the rest of the arms\n arm_lengths = self.arm_lengths\n max_idx = np.argmax(arm_lengths)\n max_len = arm_lengths[max_idx]\n sum_lens = np.sum(arm_lengths[np.arange(len(arm_lengths)) != max_idx])\n if ((not np.isclose(max_len, sum_lens) and (max_len > sum_lens))):\n raise ValueError('The robot arm cannot reach everywhere in its disk')\n\n def link_vectors(self, positions=None):\n '''\n Return a list of arm link vectors.\n :param positions: If effector positions are passed as an argument, return the link vectors of this list instead.\n '''\n link_vectors = []\n\n if (positions is not None):\n j_p = positions\n else:\n j_p = self.effector_positions\n\n for i in range(len(self.effector_positions) - 1):\n # The link vectors are the difference in the positions of the effectors\n link_vectors.append(j_p[i + 1] - j_p[i])\n\n return link_vectors\n\n def get_arm_lengths(self, external_positions=None):\n '''\n Return the arm link lenghts.\n :param external_positions: If effector positions are passed as an argument, return the arm lenghts of this list instead.\n '''\n arm_lengths = []\n\n if (external_positions is None):\n j_pos = self.effector_positions\n else:\n j_pos = external_positions\n\n for i in range(len(j_pos) - 1):\n # The norm of the vector between the two effectors in cartesian coordinates\n arm_lengths.append(np.linalg.norm(j_pos[i + 1] - j_pos[i]))\n return np.array(arm_lengths)\n\n\n def joint_angles(self, positions=None):\n '''\n Retun the joint angles of the current robot configuration.\n :param positions: If a list of effector positions is passed as an argument, return the joint angles of this list instead.\n '''\n if (positions is not None):\n links = self.link_vectors(positions)\n else:\n links = self.link_vectors()\n joint_angles = []\n original_frame_angles = []\n\n # The first joint angle is the signed arctan of the x,y coordinates of the first link vector\n first_angle = np.arctan2(links[0][1], links[0][0])\n joint_angles.append(first_angle)\n original_frame_angles.append(first_angle)\n\n for i in range(1, len(links)):\n # Each joint angle is the angle with the x-axis - the previous angle with the x-axis\n frame_angle = np.arctan2(links[i][1], links[i][0])\n angle = frame_angle - original_frame_angles[-1]\n original_frame_angles.append(frame_angle)\n joint_angles.append(angle)\n\n return np.array(joint_angles)\n\n def effector_pos_from_angles(self, joint_angles, batch= False):\n '''\n Infer the effector positions required to produced the given joint angles by forward kinematics.\n :param batch: Do it batch mode, where dim = 0 is the batch dimention\n :param joint_angles: Joint angles, list or array of lengths the number of effectors-1.\n '''\n\n if(batch):\n # Check that the list of joint_angles has the right length\n assert len(joint_angles) == len(self.effector_positions) - 1\n arm_lengths = self.arm_lengths\n\n effector_pos = [np.zeros((joint_angles[0].size,2))]\n cumulative_angle = np.zeros((joint_angles[0].size,1))\n for i in range(len(arm_lengths)):\n # Working out the forward kinematics, placing the first link at the origin\n angle = joint_angles[i] + cumulative_angle\n cumulative_angle += joint_angles[i]\n calc_pos = effector_pos[-1] + arm_lengths[i] * np.concatenate([np.cos(angle), np.sin(angle)],axis=1)\n effector_pos.append(calc_pos)\n\n # Adding the origin position to all the effector positions\n effector_pos = [jp + self.effector_positions[0] for jp in effector_pos]\n\n return effector_pos\n else:\n # Check that the list of joint_angles has the right length\n assert len(joint_angles) == len(self.effector_positions) - 1\n arm_lengths = self.arm_lengths\n\n effector_pos = [np.array([0, 0])]\n cumulative_angle = 0\n for i in range(len(arm_lengths)):\n # Working out the forward kinematics, placing the first link at the origin\n angle = joint_angles[i] + cumulative_angle\n cumulative_angle += joint_angles[i]\n calc_pos = effector_pos[-1] + arm_lengths[i] * np.array([np.cos(angle), np.sin(angle)])\n effector_pos.append(calc_pos)\n\n # Adding the origin position to all the effector positions\n effector_pos = [jp + self.effector_positions[0] for jp in effector_pos]\n\n return effector_pos\n\n def apply_angle_changes(self,angle_changes, initial_angles, dynamics, batch = False):\n '''\n Apply angle changes accroding to specified dynamcis.\n :param batch: Do it batch mode, where dim = 0 is the batch dimention\n :return: new_angles = initial_angles + angle_changes*initial_angles\n '''\n if(batch):\n new_angles = [(initial_angles[:,i]+angle_changes[:,i]*dynamics[:,i]).reshape(-1,1) for i in range(angle_changes.shape[1])]\n new_effector_pos = self.effector_pos_from_angles(new_angles, batch)\n\n return new_effector_pos, new_angles\n else:\n new_angles = [initial_angles[i] + angle_changes[i] * dynamics[i] for i in range(len(angle_changes))]\n new_effector_pos = self.effector_pos_from_angles(new_angles)\n\n return new_effector_pos, new_angles\n\n\n def move_joints(self, angle_changes, external_dynamics_params=None):\n '''\n Move the joints of the arm by some given angle change, subject to the effective dynamics of the arm.\n :param angle_changes: List or array with the amount in radiants by which to move each joint.\n :param external_dynamics_params: If present, use these dynamics parameters to move the joint.\n '''\n # Verify that the angle array given is of the correct size\n assert len(angle_changes) == len(self.effector_positions) - 1\n\n if (external_dynamics_params is not None):\n use_dyns = self.get_dyns_from_params(external_dynamics_params)\n else:\n use_dyns = self.__effective_dynamics\n\n # Get previous joint angles, add the the changes, run forward kinematics and apply the changes\n j_a = self.joint_angles()\n\n #j_a = [j_a[i] + angle_changes[i] * use_dyns[i] for i in range(len(angle_changes))]\n\n #new_join_pos = self.effector_pos_from_angles(j_a)\n new_join_pos, _ = self.apply_angle_changes(angle_changes,j_a,use_dyns)\n\n assert (new_join_pos[0] == self.effector_positions[0]).all()\n\n self.effector_positions = new_join_pos\n\n def modulate_angle(self, angle):\n '''Map the given angle betwwen pi and -pi'''\n twopi = 2 * np.pi\n angle = angle % twopi\n\n angle = (angle + twopi) % twopi\n\n if (angle > np.pi):\n angle -= twopi\n\n return angle\n\n def reach_check(self, goal_pos):\n '''Verify that the goal is within the reaching disk of the arm'''\n if (np.linalg.norm(goal_pos - self.origin_position) > np.sum(self.arm_lengths)):\n return False\n\n return True\n\n def full_strech_positions(self, goal_pos):\n '''Return the effector positions of a fully stretched arm in the direction of the specified goal'''\n arm_lengths = self.arm_lengths\n\n vec = goal_pos - self.origin_position\n dir = vec / np.linalg.norm(vec)\n\n positions = [self.origin_position]\n\n cumulative_length = 0\n for l in arm_lengths:\n cumulative_length += l\n positions.append(dir * cumulative_length + self.origin_position)\n\n return positions\n\n def fabrik(self, goal_pos, threshold=0.0000001, external_positions=None):\n '''\n Use the FABRIK algorithm solve the inverse kinematics and get the effector positions where the goal is reached.\n :param goal_pos: The goal position to reach.\n :param threshold: Threshold in terms of euclidean distance for considering the goal reached.\n :param external_positions: If external positions are supplied, use these as the arm's initial effector positions instead.\n :return: The list of effector positions such that, starting at the current (or external) positions, the arm now touches the goal.\n '''\n\n if (not self.reach_check(goal_pos)):\n return self.full_strech_positions(goal_pos)\n\n if (external_positions is None):\n positions = self.effector_positions\n arm_lengths = self.arm_lengths\n else:\n positions = external_positions\n arm_lengths = self.get_arm_lengths(external_positions)\n\n max_idx = len(positions) - 1\n while (np.linalg.norm(goal_pos - positions[-1]) > threshold):\n\n # Starting from the goal position, work backwards and calculate intermediate effector positions.\n tmp_effector_pos = collections.deque()\n tmp_effector_pos.append(goal_pos)\n\n for m in range(max_idx - 1, -1, -1):\n vec = positions[m] - tmp_effector_pos[0]\n tmp_pos = vec / np.linalg.norm(vec) * arm_lengths[m] + tmp_effector_pos[0]\n tmp_effector_pos.appendleft(tmp_pos)\n\n # Starting from the origin, adjust the effector positions.\n for m in range(1, max_idx + 1):\n vec = tmp_effector_pos[m] - positions[m - 1]\n positions[m] = vec / np.linalg.norm(vec) * arm_lengths[m - 1] + positions[m - 1]\n\n return positions\n\n def get_angle_actions(self, new_positions, initial_positions=None, iterations=1, correct_for_dynamics=False,\n external_dynamics=None):\n '''\n Given a set of new effector positions, calculate a joint angles differential making the arm move towards this\n new configuration.\n :param new_positions: New effector positions - List of numpy arrays of length 2\n :param initial_positions: If given, use these as the initial effector positions to calculate the angle differences.\n :param iterations: Number of times the angle differencial should be applied to reach the end position from the\n start position.\n :param correct_for_dynamics: Whether to calculate the angle differences to nullify the effect of the dynamics of the arm.\n :param external_dynamics: If supplied, treat the external dynamics as the dynamics parameters of the arm.\n :return:\n '''\n if (correct_for_dynamics):\n dyns = self.effective_dynamics\n else:\n dyns = np.array([1] * (len(self.effector_positions) - 1))\n\n if (external_dynamics is not None):\n dyns = self.get_dyns_from_params(external_dynamics)\n\n initial_j_a = self.joint_angles(initial_positions)\n final_j_a = self.joint_angles(new_positions)\n return np.array([self.modulate_angle(final_j_a[i] - initial_j_a[i]) / (iterations * dyns[i]) for i in\n range(len(initial_j_a))])\n\n\n\n\n" ]
[ [ "numpy.linalg.norm", "numpy.cos", "numpy.sin", "numpy.arctan2", "numpy.concatenate", "numpy.copy", "numpy.argmax", "numpy.random.rand", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pyplanes/pyplanes
[ "0b69ac4cfff0d278497fe2ad5ae096721c983f6f" ]
[ "pyplanes/mesh/edge.py" ]
[ "#! /usr/bin/env python3\n# -*- coding:utf8 -*-\n#\n# edge.py\n#\n# This file is part of pyplanes, a software distributed under the MIT license.\n# For any question, please contact [email protected].\n#\n# Copyright (c) 2018 The pyplanes authors\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n\nimport numpy as np\nfrom numpy.lib.scimath import sqrt\n\n\nclass Edge(object):\n \"\"\"\n Holds an edge (2 nodes) and its labels\n\n Attributes\n ----------\n n1, n2 : Node instance\n Both nodes at the ends of the edge\n labels : list[str]\n labels applied to the edge\n\n Parameters\n ----------\n n1, n2 : Node instance\n Both nodes at the ends of the edge\n labels : list[str]\n labels applied to the edge\n \"\"\"\n\n def __init__(self, n1, n2, labels=None):\n\n assert n1.dim == n2.dim, 'Both the Node objects passed to Edge must have the same dimension'\n\n self.n1 = n1\n self.n2 = n2\n self.labels = [] if labels is None else labels\n\n def length(self):\n \"\"\"\n Compute the length of the edge\n\n Return\n ------\n length : float\n length of the segment\n \"\"\"\n return sqrt(self.n1.coords**2 - self.n2.coords**2)\n" ]
[ [ "numpy.lib.scimath.sqrt" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
anacost/tuning_guit
[ "cb6a4294f102b4ee21b95bdba7f49079bd1cc690" ]
[ "tuning_recorded.py" ]
[ "import sounddevice as sd\nfrom scipy.io.wavfile import write\nimport librosa\n\nfs = 44100 #Sample rate\nseconds = 4 #duration of recording\n\nprint('start recording')\nmyrecording = sd.rec(int(seconds*fs), samplerate=fs, channels=1)\nsd.wait() #wait until recording is finished\nprint('finished recording')\nwrite('output.wav', fs, myrecording) #save as wav file\ny, sr =librosa.load('output.wav')\nlibrosa.yin(y, fmin=310, fmax=400)\n\n# Guitar strings are E2=82.41Hz, A2=110Hz, D3=146.8Hz, G3=196Hz, B3=246.9Hz, E4=329.6Hz" ]
[ [ "scipy.io.wavfile.write" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
Addy81/hla_assimilator
[ "845e0958d58185c4bcb12a38fdffc3708456d118" ]
[ "archive/NIHR_Assimilator/NIHR_Assimilator/user_uploads/wooey_scripts/wooey_assimilate_tkAUeI5.py" ]
[ "#!/usr/bin/python\n#\n#\n#\n#\n# Adriana Toutoudaki (October 2018), contact: [email protected]\n\nimport pandas as pd\nimport re\nimport sys\nimport numpy as np\nimport argparse\n\n\nparser = argparse.ArgumentParser(description='Assimilate low res HLA type data.')\nparser.add_argument('input', help='input file .xlsx to be analysed')\nparser.add_argument('output', help='output file file name to be generated')\n\n\n#parser.add_argument('f', type=argparse.FileType('r'))\nargs = parser.parse_args()\n\ninput_file =args.input\n#print (input_file)\n\noutput_file = args.output\n\ndata = pd.read_excel(input_file, \"Main data\")\n\nrules_file = './HLA_rules.xlsx'\n\n\n#data = pd.read_excel(data_file, \"Main data\")\n#rules = pd.read_excel(rules_file)\n\n# print (data)\n# make rules lists\n#A_LR = (rules.loc[:20,'HLA-A']).tolist()\n#A_HR = (rules.loc[:20,'Assimilation']).tolist()\n\n\n# fill_split function iterates through the columns containing the HLA data and fills the split column\n# with the equivalent broad if empty\n\ndef fill_split(patient):\n gene_list= [\"A\",\"B\",\"C\",\"DR\",\"DP\",\"DQ\"]\n for gene in gene_list:\n for column in data.columns:\n first_split = patient + '_First_' + gene + '_Split'\n first_broad = patient + '_First_' + gene + '_Broad'\n second_split = patient + '_Second_' + gene + '_Split'\n second_broad = patient + '_Second_' + gene + '_Broad'\n\n #Match column names above to select the correct one to populate\n\n x = re.match(first_split, column)\n y = re.match(second_split, column)\n\n if x:\n data[first_split] = data[first_split].fillna(data[first_broad])\n elif y:\n data[second_split] = data[second_split].fillna(data[second_broad])\n\n\n# Run function for Recipient and Donor data\nfill_split('Recip')\nfill_split('Donor')\n\n\n# hard-coded list of rules. This can be parsed to the script instead if preferred.\n\n# A_LR = rules.loc[:20,'A_LR'].tolist()\n# A_HR = rules.loc[:20,'A_HR'].tolist()\nA_LR = ['A1', 'A2', 'A3', 'A11', 'A23', 'A24', 'A25', 'A26', 'A29', 'A30', 'A31', 'A32', 'A33', 'A34', 'A36', 'A43', 'A66', 'A68', 'A69', 'A74', 'A80']\nA_HR = ['A*01:01', 'A*02:01', 'A*03:01', 'A*11:01', 'A*23:01', 'A*24:02', 'A*25:01', 'A*26:01', 'A*29:02', 'A*30:01', 'A*31:01', 'A*32:01', 'A*33:01', 'A*34:01', 'A*36:01', 'A*43:01', 'A*66:01', 'A*68:01', 'A*69:01', 'A*74:01', 'A*80:01']\n\n# B_LR = rules.loc[:45,'B_LR'].tolist()\n# B_HR = rules.loc[:45,'B_HR'].tolist()\nB_LR = ['B7', 'B8', 'B13', 'B64', 'B65', 'B62', 'B75', 'B72', 'B71', 'B76', 'B77', 'B63', 'B18', 'B27', 'B35', 'B37', 'B38', 'B39', 'B60', 'B61', 'B40', 'B41', 'B42', 'B44', 'B45', 'B46', 'B47', 'B48', 'B49', 'B50', 'B51', 'B52', 'B53', 'B54', 'B55', 'B56', 'B57', 'B58', 'B59', 'B67', 'B73', 'B78', 'B81', 'B82']\nB_HR = ['B*07:02', 'B*08:01', 'B*13:01', 'B*14:01', 'B*14:02', 'B*15:01', 'B*15:02', 'B*15:03', 'B*15:10', 'B*15:12', 'B*15:13', 'B*15:16', 'B*18:01', 'B*27:05', 'B*35:01', 'B*37:01', 'B*38:01', 'B*39:01', 'B*40:01', 'B*40:02', 'B*40:05', 'B*41:01', 'B*42:01', 'B*44:02', 'B*45:01', 'B*46:01', 'B*47:01', 'B*48:01', 'B*49:01', 'B*50:01', 'B*51:01', 'B*52:01', 'B*53:01', 'B*54:01', 'B*55:01', 'B*56:01', 'B*57:01', 'B*58:01', 'B*59:01', 'B*67:01', 'B*73:01', 'B*78:01', 'B*81:01', 'B*82:01']\n\n# C_LR = rules.loc[:17,'C_LR'].tolist()\n# C_HR = rules.loc[:17,'C_HR'].tolist()\n\n# C rules- simple. The Cw/B pairings need to be added.\nC_LR = [\"Cw1\",\"Cw2\",\"Cw4\",\"Cw9\",\"Cw5\",\"Cw6\",\"Cw12\",\"Cw14\",\"Cw15\",\"Cw17\",\"Cw18\",]\nC_HR = [\"C*01:02\",\"C*02:02\",\"C*04:01\",\"C*03:03\",\"C*05:01\",\"C*06:02\",\"C*12:03\",\"C*14:02\",\"C*15:02\",\"C*17:01\",\"C*18:01\",]\n\n\n# replace\n\n# Copy split column and rename it to with an HR_ prefix\nfor column in data.columns:\n column_check = re.search('_Split', column)\n col_index = data.columns.get_loc(column)\n new_column_name = \"HR_\" + column\n if column_check:\n data.insert((col_index + 1), new_column_name, data[column])\n\n# when there's the first Cw present - assume homozygous so fill the Second Split column\n\n\ndef fill_hom(patient, gene):\n first = 'HR_' + patient + '_First_'+ gene + '_Split'\n second = 'HR_' + patient + '_Second_'+ gene + '_Split'\n\n for column in data.columns:\n f = re.match(second, column)\n if f:\n data[second] = data[second].fillna(data[first])\n else:\n pass\n\n\nfill_hom('Recip', 'C')\nfill_hom('Donor', 'C')\n\nfill_hom('Recip', 'DQ')\nfill_hom('Donor', 'DQ')\n\n# Replace low-res alleles in the HR_ columns using the rule lists above\n\nfor column in data.columns:\n column_check = re.match('HR_', column)\n if column_check:\n data[column].replace(to_replace = A_LR, value = A_HR, inplace = True)\n data[column].replace(to_replace = B_LR, value = B_HR, inplace = True)\n data[column].replace(to_replace = C_LR, value = C_HR, inplace = True)\n else:\n pass\n\n\n# special Cw/B pairing replacement\n# Function that replaces Cw alleles based on the B allele correlation\n\n\nrows = data.shape[0]\n\ndef c_assimilation(to_replace, general_rule, exc1, exc2=(None, None),exc3=(None, None)):\n for patient in ['Recip', 'Donor']:\n for variable in ['First', 'Second']:\n #rows = data.shape[0]\n for row in range(rows):\n c_col = \"HR_\" + patient + '_' + variable + '_C_Split'\n b1_col = \"HR_\" + patient + '_First_B_Split'\n b2_col = \"HR_\" + patient + '_Second_B_Split'\n\n c = data.loc[row, c_col]\n b1 = data.iloc[row][b1_col]\n b2 = data.iloc[row][b2_col]\n\n if pd.isnull(data.loc[row, c_col]):\n pass\n elif c == to_replace and ((b1 == exc1[0]) or (b2 == exc1[0])):\n data.loc[row, c_col] = re.sub(to_replace, exc1[1], c)\n elif c == to_replace and ((b1 == exc2[0]) or (b2 == exc2[0])):\n data.loc[row, c_col] = re.sub(to_replace, exc2[1], c)\n elif c == to_replace and ((b1 == exc3[0]) or (b2 == exc3[0])):\n data.loc[row, c_col] = re.sub(to_replace, exc2[1], c)\n else:\n data.loc[row, c_col] = re.sub(to_replace, general_rule, c)\n\n\n# Function that replaces Cw alleles based on the B allele correlation\n# format of function is\n# to_replace - CwX : the low res allele to be substituted\n# general_rule : C*xx:xx :the most common high-res allele\n# exc1,exc2,exc3: tuple that contains the associated B allele with the special C to replace. (B*xx:xx,C*xx:xx)\n\n\nc_assimilation(to_replace='Cw10', general_rule='C*03:02', exc1=('B*40:01', 'C*03:04'), exc2=('B*15:01', 'C*03:04'))\nc_assimilation(to_replace='Cw8', general_rule='C*08:01', exc1=('B*14:01', 'C*08:02'), exc2=('B*14:02', 'C*08:02'))\nc_assimilation(to_replace='Cw16', general_rule='C*16:01', exc1=('B*44:03', 'C*16:02'), exc2=('B*55:01', 'C*16:02'))\nc_assimilation(to_replace='Cw7', general_rule='C*07:01', exc1=('B*07:02', 'C*07:02'))\n\n\n#add a DR51/52/53 column\n\ncolumn_patterns = ['Recip_First', 'Recip_Second', 'Donor_First', 'Donor_Second']\n\nfor c_pat in column_patterns:\n for column in data.columns:\n dr_pattern = 'HR_' + c_pat + '_DR_Split'\n column_match = re.match(dr_pattern, column)\n col_index = data.columns.get_loc(dr_pattern)\n if column_match:\n new_dr_column = 'HR_' + c_pat + '_DRB3/4/5'\n data.insert((col_index+1),new_dr_column,np.nan)\n\nfor c_pat in column_patterns:\n for column in data.columns:\n dqa_pattern = 'HR_' + c_pat + '_DQ_Split'\n column_match = re.match(dqa_pattern, column)\n col_index = data.columns.get_loc(dqa_pattern)\n if column_match:\n new_dq_column = 'HR_' + c_pat + '_DQA'\n data.insert((col_index + 1), new_dq_column, np.nan)\n\nrules = pd.read_excel('classII_rules.xlsx')\n\n# create a list containing all class II rules\n\n\nclassII = []\nrule_rows = rules.shape[0]\nfor row in range(rule_rows):\n row_sublist = []\n for column in rules.columns:\n row_sublist.append(rules.loc[row][column])\n classII.append(row_sublist)\n\n\n''''\nfor c_pat in column_patterns:\n dr_col = 'HR_' + c_pat + '_DR_Split'\n dq_col = 'HR_' + c_pat + '_DQ_Split'\n drb_col = 'HR_' + c_pat + '_DRB3/4/5'\n dqa_col = 'HR_' + c_pat + '_DQA'\n\n for row in range(rows):\n dr = data.loc[row, dr_col]\n dq = data.loc[row, dq_col]\n\n for rule in classII:\n if pd.isnull(data.loc[row, dr_col])\n pass\n elif dr == rule[0] and dq == rule[4]:\n\n'''\n\n# save file into a different excel file\nwriter = pd.ExcelWriter(output_file, engine = 'xlsxwriter')\n\ndata.to_excel(writer,sheet_name='Main data')\nwriter.save()" ]
[ [ "pandas.read_excel", "pandas.isnull", "pandas.ExcelWriter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
cfwelch/longitudinal_dialog
[ "9f2de780026565df6447301a134a3f2126b0e64b" ]
[ "equal_bin_response_times.py" ]
[ "\n\nimport msgpack, nltk, math, os\nimport dateutil.parser\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\nfrom utils import DEFAULT_TIMEZONE, settings\n\nSPLIT_INTO = 5\n\n# To split into 5 bins we will need 39315.6 points per bin.\n# The bin cutoffs are: [-1, 7, 21, 59, 256, 266937297]\n# \tBin 1: 39874\n# \tBin 2: 40384\n# \tBin 3: 38101\n# \tBin 4: 38925\n# \tBin 5: 39294\n\ndef main():\n # Read all the data\n list_dir_set = os.listdir(settings['DATA_MERGE_DIR'])\n response_times = []\n count = 0\n zeros = 0\n for filename in list_dir_set:\n count += 1\n print('File (' + str(count) + '/' + str(len(list_dir_set)) + '): ' + filename)\n convo = None\n with open(settings['DATA_MERGE_DIR'] + '/' + filename, 'rb') as handle:\n convo = msgpack.unpackb(handle.read())\n cname = convo[b'with'].decode()\n\n prev = None\n prev_date = None\n for message in tqdm(convo[b'messages']):\n if b'text' not in message:\n continue\n cur_speaker = message[b'user'].decode()\n mdate = dateutil.parser.parse(message[b'date'])\n if cur_speaker in settings['my_name'] and prev != None and prev not in settings['my_name']:\n #msg_text = message[b'text']\n if mdate.tzinfo == None:\n mdate = DEFAULT_TIMEZONE.localize(mdate)\n td = (mdate - prev_date).seconds + (mdate - prev_date).days*24*60*60 if prev_date != None else 0\n if td > 0:\n response_times.append(td)\n else:\n zeros += 1\n prev = cur_speaker\n prev_date = mdate\n\n print('Number of points where turn changes to you: ' + str(len(response_times)))\n #print('Number of times where td=0: ' + str(zeros))\n pts_per_bin = len(response_times) * 1.0 / SPLIT_INTO\n print('To split into ' + str(SPLIT_INTO) + ' bins we will need ' + str(pts_per_bin) + ' points per bin.')\n\n rps = np.array(response_times)\n bin_counter = 0\n cutoffs = [-1]\n for secs in range(0, max(response_times)):\n if np.sum(rps <= secs) >= (bin_counter + 1) * pts_per_bin:\n cutoffs.append(secs)\n bin_counter += 1\n if len(cutoffs) == SPLIT_INTO:\n break\n cutoffs.append(max(response_times))\n\n print('The bin cutoffs are: ' + str(cutoffs))\n for i in range(SPLIT_INTO):\n print('\\tBin ' + str(i+1) + ': ' + str(np.sum((rps > cutoffs[i]) & (rps <= cutoffs[i+1]))))\n\n #response_times = [math.log(i) for i in response_times]\n #plt.hist(response_times, bins=100)\n #plt.show()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rterbush/mljar-supervised
[ "a85c90f7be59278bf856b0665380954890053989" ]
[ "supervised/tuner/mljar_tuner.py" ]
[ "import os\nimport copy\nimport json\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom supervised.tuner.random_parameters import RandomParameters\nfrom supervised.algorithms.registry import AlgorithmsRegistry\nfrom supervised.preprocessing.preprocessing_categorical import PreprocessingCategorical\nfrom supervised.tuner.preprocessing_tuner import PreprocessingTuner\nfrom supervised.tuner.hill_climbing import HillClimbing\nfrom supervised.algorithms.registry import (\n BINARY_CLASSIFICATION,\n MULTICLASS_CLASSIFICATION,\n REGRESSION,\n)\n\nimport logging\nfrom supervised.utils.config import LOG_LEVEL\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(LOG_LEVEL)\n\n\nclass MljarTuner:\n def __init__(\n self,\n tuner_params,\n algorithms,\n ml_task,\n validation_strategy,\n explain_level,\n data_info,\n golden_features,\n features_selection,\n train_ensemble,\n stack_models,\n adjust_validation,\n boost_on_errors,\n kmeans_features,\n mix_encoding,\n optuna_time_budget,\n optuna_init_params,\n n_jobs,\n seed,\n ):\n logger.debug(\"MljarTuner.__init__\")\n self._start_random_models = tuner_params.get(\"start_random_models\", 5)\n self._hill_climbing_steps = tuner_params.get(\"hill_climbing_steps\", 3)\n self._top_models_to_improve = tuner_params.get(\"top_models_to_improve\", 3)\n self._algorithms = algorithms\n self._ml_task = ml_task\n self._validation_strategy = validation_strategy\n self._explain_level = explain_level\n self._data_info = data_info\n self._golden_features = golden_features\n self._features_selection = features_selection\n self._train_ensemble = train_ensemble\n self._stack_models = stack_models\n self._adjust_validation = adjust_validation\n self._boost_on_errors = boost_on_errors\n self._kmeans_features = kmeans_features\n self._mix_encoding = mix_encoding\n self._optuna_time_budget = optuna_time_budget\n self._optuna_init_params = optuna_init_params\n self._n_jobs = n_jobs\n self._seed = seed\n self._unique_params_keys = []\n\n def _apply_categorical_strategies(self):\n if self._data_info is None:\n return []\n if self._data_info.get(\"columns_info\") is None:\n return []\n\n strategies = []\n for k, v in self._data_info[\"columns_info\"].items():\n # if (\n # \"categorical\" in v\n # and PreprocessingTuner.CATEGORICALS_LOO not in strategies\n # ):\n # strategies += [PreprocessingTuner.CATEGORICALS_LOO]\n\n if (\n PreprocessingCategorical.FEW_CATEGORIES in v\n and PreprocessingTuner.CATEGORICALS_MIX not in strategies\n and self._mix_encoding\n ):\n strategies += [PreprocessingTuner.CATEGORICALS_MIX]\n\n if len(strategies) == 1: # disable loo encoding\n # cant add more\n # stop\n break\n\n return strategies\n\n def _can_apply_kmeans_features(self):\n if self._data_info is None:\n return False\n\n # are there any continous\n continous_cols = 0\n for k, v in self._data_info[\"columns_info\"].items():\n if \"categorical\" not in v:\n continous_cols += 1\n\n # too little columns\n if continous_cols == 0:\n return False\n\n # too many columns\n if continous_cols > 300:\n return False\n\n # all good, can apply kmeans\n return True\n\n def _can_apply_golden_features(self):\n if self._data_info is None:\n return False\n\n # are there any continous\n continous_cols = 0\n for k, v in self._data_info[\"columns_info\"].items():\n if \"categorical\" not in v:\n continous_cols += 1\n\n # too little columns\n if continous_cols == 0:\n return False\n\n # all good, can apply golden features\n return True\n\n def steps(self):\n\n all_steps = []\n if self._adjust_validation:\n all_steps += [\"adjust_validation\"]\n\n all_steps += [\"simple_algorithms\", \"default_algorithms\"]\n\n if self._start_random_models > 1:\n all_steps += [\"not_so_random\"]\n\n categorical_strategies = self._apply_categorical_strategies()\n if PreprocessingTuner.CATEGORICALS_MIX in categorical_strategies:\n all_steps += [\"mix_encoding\"]\n if PreprocessingTuner.CATEGORICALS_LOO in categorical_strategies:\n all_steps += [\"loo_encoding\"]\n if self._golden_features and self._can_apply_golden_features():\n all_steps += [\"golden_features\"]\n if self._kmeans_features and self._can_apply_kmeans_features():\n all_steps += [\"kmeans_features\"]\n if self._features_selection:\n all_steps += [\"insert_random_feature\"]\n all_steps += [\"features_selection\"]\n for i in range(self._hill_climbing_steps):\n all_steps += [f\"hill_climbing_{i+1}\"]\n if self._boost_on_errors:\n all_steps += [\"boost_on_errors\"]\n if self._train_ensemble:\n all_steps += [\"ensemble\"]\n if self._stack_models:\n all_steps += [\"stack\"]\n if self._train_ensemble:\n all_steps += [\"ensemble_stacked\"]\n return all_steps\n\n def get_model_name(self, model_type, models_cnt, special=\"\"):\n return f\"{models_cnt}_\" + special + model_type.replace(\" \", \"\")\n\n def filter_random_feature_model(self, models):\n return [m for m in models if \"RandomFeature\" not in m.get_name()]\n\n def generate_params(\n self, step, models, results_path, stacked_models, total_time_limit\n ):\n try:\n models_cnt = len(models)\n if step == \"adjust_validation\":\n return self.adjust_validation_params(models_cnt)\n elif step == \"simple_algorithms\":\n return self.simple_algorithms_params(models_cnt)\n elif step == \"default_algorithms\":\n return self.default_params(models_cnt)\n elif step == \"not_so_random\":\n return self.get_not_so_random_params(models_cnt)\n elif step == \"mix_encoding\":\n return self.get_mix_categorical_strategy(models, total_time_limit)\n elif step == \"loo_encoding\":\n return self.get_loo_categorical_strategy(models, total_time_limit)\n elif step == \"golden_features\":\n return self.get_golden_features_params(\n models, results_path, total_time_limit\n )\n elif step == \"kmeans_features\":\n return self.get_kmeans_features_params(\n models, results_path, total_time_limit\n )\n elif step == \"insert_random_feature\":\n return self.get_params_to_insert_random_feature(\n models, total_time_limit\n )\n elif step == \"features_selection\":\n return self.get_features_selection_params(\n self.filter_random_feature_model(models),\n results_path,\n total_time_limit,\n )\n elif \"hill_climbing\" in step:\n return self.get_hill_climbing_params(\n self.filter_random_feature_model(models)\n )\n elif step == \"boost_on_errors\":\n return self.boost_params(models, results_path, total_time_limit)\n elif step == \"ensemble\":\n return [\n {\n \"model_type\": \"ensemble\",\n \"is_stacked\": False,\n \"name\": \"Ensemble\",\n \"status\": \"initialized\",\n \"final_loss\": None,\n \"train_time\": None,\n }\n ]\n elif step == \"stack\":\n return self.get_params_stack_models(stacked_models)\n elif step == \"ensemble_stacked\":\n\n # do we have stacked models?\n any_stacked = False\n for m in models:\n if m._is_stacked:\n any_stacked = True\n if not any_stacked:\n return []\n\n return [\n {\n \"model_type\": \"ensemble\",\n \"is_stacked\": True,\n \"name\": \"Ensemble_Stacked\",\n \"status\": \"initialized\",\n \"final_loss\": None,\n \"train_time\": None,\n }\n ]\n\n # didnt find anything matching the step, return empty array\n return []\n except Exception as e:\n return []\n\n def get_params_stack_models(self, stacked_models):\n if stacked_models is None or len(stacked_models) == 0:\n return []\n\n X_train_stacked_path = \"\"\n added_columns = []\n\n model_types = [\"Xgboost\", \"LightGBM\", \"CatBoost\"]\n generated_params = {m: [] for m in model_types}\n types_score_order = []\n # resue old params\n for m in stacked_models:\n # use only Xgboost, LightGBM and CatBoost as stacked models\n if m.get_type() not in model_types:\n continue\n\n if m.get_type() not in types_score_order:\n types_score_order += [m.get_type()]\n\n if m.params.get(\"injected_sample_weight\", False):\n # dont use boost_on_errors model for stacking\n # there will be additional boost_on_errors step\n continue\n\n params = copy.deepcopy(m.params)\n\n params[\"validation_strategy\"][\"X_path\"] = params[\"validation_strategy\"][\n \"X_path\"\n ].replace(\"X.parquet\", \"X_stacked.parquet\")\n\n params[\"name\"] = params[\"name\"] + \"_Stacked\"\n params[\"is_stacked\"] = True\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"data_type\"] += \"_stacked\"\n if self._optuna_time_budget is not None:\n params[\"optuna_time_budget\"] = self._optuna_time_budget\n params[\"optuna_init_params\"] = self._optuna_init_params\n\n if \"model_architecture_json\" in params[\"learner\"]:\n # the new model will be created with wider input size\n del params[\"learner\"][\"model_architecture_json\"]\n\n if self._ml_task == REGRESSION:\n # scale added predictions in regression if the target was scaled (in the case of NN)\n # this piece of code might not work, leave it as it is, because NN is not used for training with Stacked Data\n target_preprocessing = params[\"preprocessing\"][\"target_preprocessing\"]\n scale = None\n if \"scale_log_and_normal\" in target_preprocessing:\n scale = \"scale_log_and_normal\"\n elif \"scale_normal\" in target_preprocessing:\n scale = \"scale_normal\"\n if scale is not None:\n for col in added_columns:\n params[\"preprocessing\"][\"columns_preprocessing\"][col] = [scale]\n\n generated_params[m.get_type()] += [params]\n\n return_params = []\n for i in range(100):\n total = 0\n for m in types_score_order:\n if generated_params[m]:\n return_params += [generated_params[m].pop(0)]\n total += len(generated_params[m])\n if total == 0:\n break\n\n return return_params\n\n def adjust_validation_params(self, models_cnt):\n generated_params = []\n for model_type in [\"Decision Tree\"]:\n models_to_check = 1\n\n logger.info(f\"Generate parameters for {model_type} (#{models_cnt + 1})\")\n params = self._get_model_params(model_type, seed=1)\n if params is None:\n continue\n\n params[\"name\"] = self.get_model_name(model_type, models_cnt + 1)\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"data_type\"] = \"original\"\n\n unique_params_key = MljarTuner.get_params_key(params)\n if unique_params_key not in self._unique_params_keys:\n generated_params += [params]\n models_cnt += 1\n return generated_params\n\n def simple_algorithms_params(self, models_cnt):\n generated_params = []\n for model_type in [\"Baseline\", \"Decision Tree\", \"Linear\"]:\n if model_type not in self._algorithms:\n continue\n models_to_check = 1\n if model_type == \"Decision Tree\":\n models_to_check = min(3, self._start_random_models)\n for i in range(models_to_check):\n logger.info(f\"Generate parameters for {model_type} (#{models_cnt + 1})\")\n params = self._get_model_params(model_type, seed=i + 1)\n if params is None:\n continue\n\n params[\"name\"] = self.get_model_name(model_type, models_cnt + 1)\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"data_type\"] = \"original\"\n\n unique_params_key = MljarTuner.get_params_key(params)\n if unique_params_key not in self._unique_params_keys:\n generated_params += [params]\n models_cnt += 1\n return generated_params\n\n def skip_if_rows_cols_limit(self, model_type):\n\n max_rows_limit = AlgorithmsRegistry.get_max_rows_limit(\n self._ml_task, model_type\n )\n max_cols_limit = AlgorithmsRegistry.get_max_cols_limit(\n self._ml_task, model_type\n )\n\n if max_rows_limit is not None:\n if self._data_info[\"rows\"] > max_rows_limit:\n return True\n if max_cols_limit is not None:\n if self._data_info[\"cols\"] > max_cols_limit:\n return True\n\n return False\n\n def default_params(self, models_cnt):\n\n generated_params = []\n for model_type in [\n \"LightGBM\",\n \"Xgboost\",\n \"CatBoost\",\n \"Neural Network\",\n \"Random Forest\",\n \"Extra Trees\",\n \"Nearest Neighbors\",\n ]:\n if model_type not in self._algorithms:\n continue\n\n if self.skip_if_rows_cols_limit(model_type):\n continue\n\n logger.info(f\"Get default parameters for {model_type} (#{models_cnt + 1})\")\n params = self._get_model_params(\n model_type, seed=models_cnt + 1, params_type=\"default\"\n )\n if params is None:\n continue\n special = \"Default_\" if self._optuna_time_budget is None else \"Optuna_\"\n params[\"name\"] = self.get_model_name(\n model_type, models_cnt + 1, special=special\n )\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"data_type\"] = \"original\"\n if self._optuna_time_budget is not None:\n params[\"optuna_time_budget\"] = self._optuna_time_budget\n params[\"optuna_init_params\"] = self._optuna_init_params\n\n unique_params_key = MljarTuner.get_params_key(params)\n if unique_params_key not in self._unique_params_keys:\n generated_params += [params]\n models_cnt += 1\n return generated_params\n\n def get_not_so_random_params(self, models_cnt):\n\n model_types = [\n \"Xgboost\",\n \"LightGBM\",\n \"CatBoost\",\n \"Random Forest\",\n \"Extra Trees\",\n \"Neural Network\",\n \"Nearest Neighbors\",\n ]\n\n generated_params = {m: [] for m in model_types}\n\n for model_type in model_types:\n if model_type not in self._algorithms:\n continue\n\n if self.skip_if_rows_cols_limit(model_type):\n continue\n # minus 1 because already have 1 default\n for i in range(self._start_random_models - 1):\n\n logger.info(\n f\"Generate not-so-random parameters for {model_type} (#{models_cnt+1})\"\n )\n params = self._get_model_params(model_type, seed=i + 1)\n if params is None:\n continue\n\n params[\"name\"] = self.get_model_name(model_type, models_cnt + 1)\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"data_type\"] = \"original\"\n if self._optuna_time_budget is not None:\n params[\"optuna_time_budget\"] = self._optuna_time_budget\n params[\"optuna_init_params\"] = self._optuna_init_params\n\n unique_params_key = MljarTuner.get_params_key(params)\n if unique_params_key not in self._unique_params_keys:\n generated_params[model_type] += [params]\n models_cnt += 1\n\n \"\"\"\n return_params = []\n for i in range(100):\n total = 0\n for m in [\"Xgboost\", \"LightGBM\", \"CatBoost\"]:\n if generated_params[m]:\n return_params += [generated_params[m].pop(0)]\n total += len(generated_params[m])\n if total == 0:\n break\n\n rest_params = []\n for m in [\n \"Random Forest\",\n \"Extra Trees\",\n \"Neural Network\",\n \"Nearest Neighbors\",\n ]:\n rest_params += generated_params[m]\n if rest_params:\n np.random.shuffle(rest_params)\n return_params += rest_params\n \"\"\"\n return_params = []\n for i in range(100):\n total = 0\n for m in [\n \"LightGBM\",\n \"Xgboost\",\n \"CatBoost\",\n \"Random Forest\",\n \"Extra Trees\",\n \"Neural Network\",\n \"Nearest Neighbors\",\n ]:\n if generated_params[m]:\n return_params += [generated_params[m].pop(0)]\n total += len(generated_params[m])\n if total == 0:\n break\n\n return return_params\n\n def get_hill_climbing_params(self, current_models):\n df_models, algorithms = self.df_models_algorithms(current_models)\n generated_params = []\n counts = {model_type: 0 for model_type in algorithms}\n\n for i in range(df_models.shape[0]):\n\n model_type = df_models[\"model_type\"].iloc[i]\n counts[model_type] += 1\n if counts[model_type] > self._top_models_to_improve:\n continue\n\n m = df_models[\"model\"].iloc[i]\n\n for p in HillClimbing.get(\n m.params.get(\"learner\"), self._ml_task, len(current_models) + self._seed\n ):\n\n model_indices = [\n int(m.get_name().split(\"_\")[0]) for m in current_models\n ]\n model_max_index = np.max(model_indices)\n\n logger.info(\n \"Hill climbing step, for model #{0}\".format(model_max_index + 1)\n )\n if p is not None:\n all_params = copy.deepcopy(m.params)\n all_params[\"learner\"] = p\n\n all_params[\"name\"] = self.get_model_name(\n all_params[\"learner\"][\"model_type\"],\n model_max_index + 1 + len(generated_params),\n )\n\n if \"golden_features\" in all_params[\"preprocessing\"]:\n all_params[\"name\"] += \"_GoldenFeatures\"\n if \"drop_features\" in all_params[\"preprocessing\"] and len(\n all_params[\"preprocessing\"][\"drop_features\"]\n ):\n all_params[\"name\"] += \"_SelectedFeatures\"\n all_params[\"status\"] = \"initialized\"\n all_params[\"final_loss\"] = None\n all_params[\"train_time\"] = None\n unique_params_key = MljarTuner.get_params_key(all_params)\n\n if unique_params_key not in self._unique_params_keys:\n generated_params += [all_params]\n\n return generated_params\n\n def get_all_int_categorical_strategy(self, current_models, total_time_limit):\n return self.get_categorical_strategy(\n current_models, PreprocessingTuner.CATEGORICALS_ALL_INT, total_time_limit\n )\n\n def get_mix_categorical_strategy(self, current_models, total_time_limit):\n return self.get_categorical_strategy(\n current_models, PreprocessingTuner.CATEGORICALS_MIX, total_time_limit\n )\n\n def get_loo_categorical_strategy(self, current_models, total_time_limit):\n return self.get_categorical_strategy(\n current_models, PreprocessingTuner.CATEGORICALS_LOO, total_time_limit\n )\n\n def get_categorical_strategy(self, current_models, strategy, total_time_limit):\n\n df_models, algorithms = self.df_models_algorithms(\n current_models, time_limit=0.1 * total_time_limit\n )\n generated_params = []\n for m_type in algorithms:\n # try to add categorical strategy only for below algorithms\n if m_type not in [\n \"Xgboost\",\n # \"LightGBM\", # use built-in categoricals (but need to int encode)\n # \"Neural Network\",\n # \"Random Forest\",\n # \"Extra Trees\",\n ]:\n continue\n models = df_models[df_models.model_type == m_type][\"model\"]\n\n for i in range(min(1, len(models))):\n m = models.iloc[i]\n\n params = copy.deepcopy(m.params)\n cols_preprocessing = params[\"preprocessing\"][\"columns_preprocessing\"]\n\n for col, preproc in params[\"preprocessing\"][\n \"columns_preprocessing\"\n ].items():\n new_preproc = []\n convert_categorical = False\n\n for p in preproc:\n if \"categorical\" not in p:\n new_preproc += [p]\n else:\n convert_categorical = True\n\n col_data_info = self._data_info[\"columns_info\"].get(col)\n few_categories = False\n if col_data_info is not None and \"few_categories\" in col_data_info:\n few_categories = True\n\n if convert_categorical:\n if strategy == PreprocessingTuner.CATEGORICALS_ALL_INT:\n new_preproc += [PreprocessingCategorical.CONVERT_INTEGER]\n elif strategy == PreprocessingTuner.CATEGORICALS_LOO:\n new_preproc += [PreprocessingCategorical.CONVERT_LOO]\n elif strategy == PreprocessingTuner.CATEGORICALS_MIX:\n if few_categories:\n new_preproc += [\n PreprocessingCategorical.CONVERT_ONE_HOT\n ]\n else:\n new_preproc += [\n PreprocessingCategorical.CONVERT_INTEGER\n ]\n\n cols_preprocessing[col] = new_preproc\n\n params[\"preprocessing\"][\"columns_preprocessing\"] = cols_preprocessing\n # if there is already a name of categorical strategy in the name\n # please remove it to avoid confusion (I hope!)\n for st in [\n PreprocessingTuner.CATEGORICALS_LOO,\n PreprocessingTuner.CATEGORICALS_ALL_INT,\n PreprocessingTuner.CATEGORICALS_MIX,\n ]:\n params[\"name\"] = params[\"name\"].replace(\"_\" + st, \"\")\n params[\"name\"] += f\"_{strategy}\"\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"data_type\"] = params.get(\"data_type\", \"\") + \"_\" + strategy\n if self._optuna_time_budget is not None:\n params[\"optuna_time_budget\"] = self._optuna_time_budget\n params[\"optuna_init_params\"] = self._optuna_init_params\n\n if \"model_architecture_json\" in params[\"learner\"]:\n del params[\"learner\"][\"model_architecture_json\"]\n unique_params_key = MljarTuner.get_params_key(params)\n if unique_params_key not in self._unique_params_keys:\n generated_params += [params]\n return generated_params\n\n def df_models_algorithms(\n self, current_models, time_limit=None, exclude_golden=False\n ):\n scores = [m.get_final_loss() for m in current_models]\n model_types = [m.get_type() for m in current_models]\n names = [m.get_name() for m in current_models]\n train_times = [m.get_train_time() for m in current_models]\n\n df_models = pd.DataFrame(\n {\n \"model\": current_models,\n \"score\": scores,\n \"model_type\": model_types,\n \"name\": names,\n \"train_time\": train_times,\n }\n )\n if time_limit is not None:\n df_models = df_models[df_models.train_time < time_limit]\n df_models.reset_index(drop=True, inplace=True)\n\n if exclude_golden:\n ii = df_models[\"name\"].apply(lambda x: \"GoldenFeatures\" in x)\n df_models = df_models[~ii]\n df_models.reset_index(drop=True, inplace=True)\n\n df_models.sort_values(by=\"score\", ascending=True, inplace=True)\n model_types = list(df_models.model_type)\n u, idx = np.unique(model_types, return_index=True)\n algorithms = u[np.argsort(idx)]\n\n return df_models, algorithms\n\n def get_golden_features_params(\n self, current_models, results_path, total_time_limit\n ):\n\n df_models, algorithms = self.df_models_algorithms(\n current_models, time_limit=0.1 * total_time_limit\n )\n\n generated_params = []\n for i in range(min(3, df_models.shape[0])):\n m = df_models[\"model\"].iloc[i]\n\n params = copy.deepcopy(m.params)\n params[\"preprocessing\"][\"golden_features\"] = {\n \"results_path\": results_path,\n \"ml_task\": self._ml_task,\n }\n params[\"name\"] += \"_GoldenFeatures\"\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"data_type\"] = params.get(\"data_type\", \"\") + \"_golden_features\"\n if self._optuna_time_budget is not None:\n params[\"optuna_time_budget\"] = self._optuna_time_budget\n params[\"optuna_init_params\"] = self._optuna_init_params\n\n if \"model_architecture_json\" in params[\"learner\"]:\n del params[\"learner\"][\"model_architecture_json\"]\n unique_params_key = MljarTuner.get_params_key(params)\n if unique_params_key not in self._unique_params_keys:\n generated_params += [params]\n return generated_params\n\n def get_kmeans_features_params(\n self, current_models, results_path, total_time_limit\n ):\n\n df_models, algorithms = self.df_models_algorithms(\n current_models, time_limit=0.1 * total_time_limit, exclude_golden=True\n )\n\n generated_params = []\n for i in range(min(3, df_models.shape[0])):\n m = df_models[\"model\"].iloc[i]\n\n params = copy.deepcopy(m.params)\n params[\"preprocessing\"][\"kmeans_features\"] = {\"results_path\": results_path}\n params[\"name\"] += \"_KMeansFeatures\"\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"data_type\"] = params.get(\"data_type\", \"\") + \"_kmeans_features\"\n if self._optuna_time_budget is not None:\n params[\"optuna_time_budget\"] = self._optuna_time_budget\n params[\"optuna_init_params\"] = self._optuna_init_params\n\n if \"model_architecture_json\" in params[\"learner\"]:\n del params[\"learner\"][\"model_architecture_json\"]\n unique_params_key = MljarTuner.get_params_key(params)\n if unique_params_key not in self._unique_params_keys:\n generated_params += [params]\n return generated_params\n\n def time_features_selection(self, current_models, total_time_limit):\n\n df_models, algorithms = self.df_models_algorithms(\n current_models, time_limit=0.1 * total_time_limit\n )\n\n time_needed = 0\n for m_type in algorithms:\n\n if m_type not in [\n \"Xgboost\",\n \"LightGBM\",\n \"CatBoost\",\n \"Neural Network\",\n \"Random Forest\",\n \"Extra Trees\",\n ]:\n continue\n models = df_models[df_models.model_type == m_type][\"model\"]\n\n for i in range(min(1, len(models))):\n m = models.iloc[i]\n if time_needed == 0:\n # best model will be used two times\n # one for insert random feature\n # one for selected features\n time_needed += 2.0 * m.get_train_time()\n else:\n time_needed += m.get_train_time()\n\n return time_needed\n\n def get_params_to_insert_random_feature(self, current_models, total_time_limit):\n\n time_needed = self.time_features_selection(current_models, total_time_limit)\n\n if time_needed > 0.1 * total_time_limit:\n print(\"Not enough time to perform features selection. Skip\")\n print(\n \"Time needed for features selection ~\", np.round(time_needed), \"seconds\"\n )\n print(\n f\"Please increase total_time_limit to at least ({int(np.round(10.0*time_needed))+60} seconds) to have features selection\"\n )\n return None\n\n df_models, algorithms = self.df_models_algorithms(\n current_models, time_limit=0.1 * total_time_limit\n )\n if df_models.shape[0] == 0:\n return None\n\n m = df_models.iloc[0][\"model\"]\n\n params = copy.deepcopy(m.params)\n params[\"preprocessing\"][\"add_random_feature\"] = True\n params[\"name\"] += \"_RandomFeature\"\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"explain_level\"] = 1\n if \"model_architecture_json\" in params[\"learner\"]:\n del params[\"learner\"][\"model_architecture_json\"]\n if self._optuna_time_budget is not None:\n # dont tune algorithm with random feature inserted\n # algorithm will be tuned after feature selection\n params[\"optuna_time_budget\"] = None\n params[\"optuna_init_params\"] = {}\n\n unique_params_key = MljarTuner.get_params_key(params)\n if unique_params_key not in self._unique_params_keys:\n return [params]\n return None\n\n def get_features_selection_params(\n self, current_models, results_path, total_time_limit\n ):\n\n fname = os.path.join(results_path, \"drop_features.json\")\n if not os.path.exists(fname):\n return None\n\n drop_features = json.load(open(fname, \"r\"))\n print(\"Drop features\", drop_features)\n\n # in case of droping only one feature (random_feature)\n # skip this step\n if len(drop_features) <= 1:\n return None\n\n df_models, algorithms = self.df_models_algorithms(\n current_models, time_limit=0.1 * total_time_limit\n )\n\n generated_params = []\n for m_type in algorithms:\n # try to do features selection only for below algorithms\n if m_type not in [\n \"Xgboost\",\n \"LightGBM\",\n \"CatBoost\",\n \"Neural Network\",\n \"Random Forest\",\n \"Extra Trees\",\n ]:\n continue\n models = df_models[df_models.model_type == m_type][\"model\"]\n\n for i in range(min(1, len(models))):\n m = models.iloc[i]\n\n params = copy.deepcopy(m.params)\n params[\"preprocessing\"][\"drop_features\"] = drop_features\n params[\"name\"] += \"_SelectedFeatures\"\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"data_type\"] = (\n params.get(\"data_type\", \"\") + \"_features_selection\"\n )\n if self._optuna_time_budget is not None:\n params[\"optuna_time_budget\"] = self._optuna_time_budget\n params[\"optuna_init_params\"] = self._optuna_init_params\n\n if \"model_architecture_json\" in params[\"learner\"]:\n del params[\"learner\"][\"model_architecture_json\"]\n unique_params_key = MljarTuner.get_params_key(params)\n if unique_params_key not in self._unique_params_keys:\n generated_params += [params]\n return generated_params\n\n def _get_model_params(self, model_type, seed, params_type=\"random\"):\n model_info = AlgorithmsRegistry.registry[self._ml_task][model_type]\n\n model_params = None\n if params_type == \"default\":\n\n model_params = model_info[\"default_params\"]\n model_params[\"seed\"] = seed\n\n else:\n model_params = RandomParameters.get(model_info[\"params\"], seed + self._seed)\n if model_params is None:\n return None\n\n required_preprocessing = model_info[\"required_preprocessing\"]\n model_additional = model_info[\"additional\"]\n preprocessing_params = PreprocessingTuner.get(\n required_preprocessing, self._data_info, self._ml_task\n )\n\n model_params = {\n \"additional\": model_additional,\n \"preprocessing\": preprocessing_params,\n \"validation_strategy\": self._validation_strategy,\n \"learner\": {\n \"model_type\": model_info[\"class\"].algorithm_short_name,\n \"ml_task\": self._ml_task,\n \"n_jobs\": self._n_jobs,\n **model_params,\n },\n }\n\n if self._data_info.get(\"num_class\") is not None:\n model_params[\"learner\"][\"num_class\"] = self._data_info.get(\"num_class\")\n\n model_params[\"ml_task\"] = self._ml_task\n model_params[\"explain_level\"] = self._explain_level\n\n return model_params\n\n @staticmethod\n def get_params_key(params):\n key = \"key_\"\n for main_key in [\"preprocessing\", \"learner\", \"validation_strategy\"]:\n key += \"_\" + main_key\n for k in sorted(params[main_key]):\n if k in [\"seed\", \"explain_level\"]:\n continue\n key += \"_{}_{}\".format(k, params[main_key][k])\n return key\n\n def add_key(self, model):\n if model.get_type() != \"Ensemble\":\n key = MljarTuner.get_params_key(model.params)\n self._unique_params_keys += [key]\n\n def boost_params(self, current_models, results_path, total_time_limit):\n\n df_models, algorithms = self.df_models_algorithms(\n current_models, time_limit=0.1 * total_time_limit\n )\n best_model = None\n for i in range(df_models.shape[0]):\n if df_models[\"model_type\"].iloc[i] in [\n \"Ensemble\",\n \"Neural Network\",\n \"Nearest Neighbors\",\n ]:\n continue\n if \"RandomFeature\" in df_models[\"model\"].iloc[i].get_name():\n continue\n best_model = df_models[\"model\"].iloc[i]\n break\n if best_model is None:\n return []\n\n # load predictions\n oof = best_model.get_out_of_folds()\n\n predictions = oof[[c for c in oof.columns if c.startswith(\"prediction\")]]\n y = oof[\"target\"]\n\n if self._ml_task == MULTICLASS_CLASSIFICATION:\n oh = OneHotEncoder(sparse=False)\n y_encoded = oh.fit_transform(np.array(y).reshape(-1, 1))\n residua = np.sum(\n np.abs(np.array(y_encoded) - np.array(predictions)), axis=1\n )\n else:\n residua = np.abs(np.array(y) - np.array(predictions).ravel())\n\n df_preds = pd.DataFrame(\n {\"res\": residua, \"lp\": range(residua.shape[0]), \"target\": np.array(y)}\n )\n\n df_preds = df_preds.sort_values(by=\"res\", ascending=True)\n df_preds[\"order\"] = range(residua.shape[0])\n df_preds[\"order\"] = (df_preds[\"order\"]) / residua.shape[0] / 5.0 + 0.9\n df_preds = df_preds.sort_values(by=\"lp\", ascending=True)\n\n sample_weight_path = os.path.join(\n results_path, best_model.get_name() + \"_sample_weight.parquet\"\n )\n pd.DataFrame({\"sample_weight\": df_preds[\"order\"]}).to_parquet(\n sample_weight_path, index=False\n )\n\n generated_params = []\n\n params = copy.deepcopy(best_model.params)\n\n params[\"validation_strategy\"][\"sample_weight_path\"] = sample_weight_path\n params[\"injected_sample_weight\"] = True\n params[\"name\"] += \"_BoostOnErrors\"\n params[\"status\"] = \"initialized\"\n params[\"final_loss\"] = None\n params[\"train_time\"] = None\n params[\"data_type\"] = \"boost_on_error\"\n if \"model_architecture_json\" in params[\"learner\"]:\n del params[\"learner\"][\"model_architecture_json\"]\n if self._optuna_time_budget is not None:\n params[\"optuna_time_budget\"] = self._optuna_time_budget\n params[\"optuna_init_params\"] = self._optuna_init_params\n unique_params_key = MljarTuner.get_params_key(params)\n\n if unique_params_key not in self._unique_params_keys:\n generated_params += [params]\n\n return generated_params\n" ]
[ [ "numpy.unique", "sklearn.preprocessing.OneHotEncoder", "pandas.DataFrame", "numpy.round", "numpy.max", "numpy.argsort", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ParisNeo/pygameui
[ "685d8b09fe95901821db3240a512c7e8b5bdfe02" ]
[ "OOPyGame/__init__.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"=== Face Analyzer Helpers =>\n Module : ui\n Author : Saifeddine ALOUI (ParisNeo)\n Licence : MIT\n Description :\n User interface helpers\n<================\"\"\"\nimport time\nimport pygame\nimport cssutils\nfrom FaceAnalyzer.helpers.geometry.euclidian import is_point_inside_rect\nfrom FaceAnalyzer.helpers.ui.pygame.colors import get_color\n# Widgets\nfrom dataclasses import dataclass\nfrom urllib.request import urlopen\n\nimport numpy as np\nimport io\n\n# Initialize font\npygame.font.init()\n\n# Define orientations\nHorizontal = 0\nVertical = 1\n\n@dataclass\nclass WidgetStyle:\n \"\"\"Class for keeping track widget styling information.\n font: The font to use for the text.\n bg_color: The background color.\n border_color: The color of the border.\n text_color: The color of the text.\n border_size: The size of the border.\n font_name: The name of the font.\n font_size: The size of the font.\n left_margin: The margin on the x-axis.\n right_margin: The margin on the y-axis.\n width: The width of the widget.\n height: The height of the widget.\n align: The alignment of the text.\n img: The image to use for the widget. \n \n \"\"\"\n font : pygame.font.Font = pygame.font.Font('freesansbold.ttf', 14)\n bg_color: tuple = (100,100,100)\n border_color: tuple =(0,0,0)\n border_radius: float = 0\n text_color: tuple = (0,0,0)\n border_size: int = 0\n font_name: str = 'freesansbold'\n font_size: int = 24\n left_margin: int = 0\n right_margin: int = 0\n width: int = None\n height: int = None\n align:str = 'center'\n img:str = None\n\n\n# =============================================== Widget ==========================================\n\nclass Widget():\n def __init__(\n self,\n parent=None,\n rect:tuple=None, \n style:str=\"widget{background-color:#a9a9a9;}\\n\",\n extra_styles={}\n ):\n \"\"\"\n Creates a new Widget instance.\n\nrect: \nwithin the window.\nstyle:\nextra_styles: A dictionary of additional CSS style properties\nto be applied to the widget.\n Args:\n rect (tuple, optional): A tuple of four numbers, representing the position of the widget. Defaults to [0,0,100,50].\n style (str, optional): A string containing the CSS style properties for the widget. Defaults to \"widget{background-color:#a9a9a9;}\\n\".\n extra_styles (dict, optional): [description]. Defaults to {}.\n \"\"\"\n\n\n self.parent = parent\n self.visible = True\n self.styles=self.merge_two_dicts({\n \"widget\":WidgetStyle()\n }, extra_styles)\n self.setStyleSheet(style)\n if rect is not None:\n self.setRect(rect)\n else:\n self.rect = None\n self.rect_left_top_right_bottom = None \n\n def setParent(self, parent):\n self.parent = parent\n if self.parent is not None and self.rect is None:\n self.setRect(self.parent.rect)\n\n def setPosition(self, pos:list):\n \"\"\"Sets the position of the rectangle.\n\n Args:\n pos (list): The position of the rectangle.\n\n \"\"\"\n self.rect[0]=pos[0]\n self.rect[1]=pos[1]\n self.setRect(self.rect)\n\n def setSize(self, size):\n \"\"\"This function sets the size of the widget.\n\n Args:\n size (list): The size is given as a tuple of two numbers, the first representing the width and the second representing the height.\n \"\"\"\n self.rect[2]=size[0]\n self.rect[3]=size[1]\n self.setRect(self.rect)\n\n def setRect(self, rect):\n \"\"\"Creates a new Rectangle object.\n\n Args:\n rect (list): the rectangle's coordinates\n \"\"\"\n self.rect = rect\n self.rect_left_top_right_bottom = (rect[0],rect[1],rect[0]+rect[2],rect[1]+rect[3])\n\n def merge_two_dicts(self, x, y):\n \"\"\"\n Merges two dicts, x and y, into a new dict, z. x and y must have the same\n keys. z is created as a copy of x, and then z's keys and values are updated\n with those of y.\n\n Args:\n x ([type]): [description]\n y ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n z = x.copy() # start with keys and values of x\n z.update(y) # modifies z with keys and values of y\n return z\n\n\n def draw_rect(self, screen, style: WidgetStyle, rect:tuple=None):\n if rect is None:\n rect = self.rect\n if style.bg_color is not None:\n pygame.draw.rect(screen,style.bg_color,rect, border_radius = style.border_radius)\n if style.border_size>0:\n pygame.draw.rect(screen,style.border_color,rect, style.border_size, border_radius = style.border_radius)\n\n def blit_text(self, text, style:WidgetStyle, screen, rect:tuple=None):\n \"\"\"Blits button text using a css style\n\n Args:\n style (WidgetStyle): The style to be used\n screen ([type]): The screen on which to blit\n \"\"\"\n if rect is None:\n rect = self.rect\n text_render = style.font.render(text,True, style.text_color)\n if style.align =='center':\n screen.blit(text_render,(rect[0]+rect[2]//2-text_render.get_width()//2,rect[1]+rect[3]//2-text_render.get_height()//2)) \n elif style.align =='left':\n screen.blit(text_render,(rect[0]+style.left_margin,rect[1]+rect[3]//2-text_render.get_height()//2)) \n elif style.align =='right':\n screen.blit(text_render,(rect[0]+rect[2]-text_render.get_width(),rect[1]+rect[3]//2-text_render.get_height()//2)) \n return text_render.get_width()\n\n def setStyleSheet(self, style:str):\n \"\"\"Sets the button stylesheet\n\n Args:\n style (str): A css stylesheet to specify the button caracteristics\n \"\"\"\n self.style = cssutils.parseString(style)\n \n for rule in self.style:\n if rule.type == rule.STYLE_RULE:\n try:\n style = self.styles[rule.selectorText]\n except:\n continue\n # find property\n for property in rule.style:\n if property.name == 'width':\n v = property.value\n if v is not None:\n style.width = int(v)\n\n if property.name == 'height':\n v = property.value\n if v is not None:\n style.height = int(v)\n\n if property.name == 'color':\n v = get_color(property.value)\n if v is not None:\n style.text_color = v\n \n \n if property.name == 'border-size':\n style.border_size = int(property.value)\n if property.name == 'border-radius':\n style.border_radius = int(property.value)\n \n \n if property.name == 'background-image':\n bgi = property.value.strip()\n if bgi.startswith(\"url\"):\n image_url = bgi[4:-1]\n print(image_url)\n image_str = urlopen(image_url).read()\n # create a file object (stream)\n image_file = io.BytesIO(image_str)\n image = pygame.image.load(image_file)\n if image is not None:\n style.img = image\n if property.name == 'background-color':\n style.bg_color = get_color(property.value)\n\n # Text stuff\n if property.name=='left-margin':\n style.left_margin = int(property.value)\n if property.name=='right-margin':\n style.right_margin = int(property.value)\n if property.name=='align':\n style.align = property.value\n if property.name == 'font-size':\n style.font_size=property.value\n style.font = pygame.font.Font(style.font_name+'.ttf', style.font_size)\n if property.name == 'font-name':\n style.font_name=property.value\n style.font = pygame.font.Font(style.font_name+'.ttf', style.font_size)\n\n def paint(self, screen):\n \"\"\"Paints the button\n\n Args:\n screen ([type]): The screen on which to blit\n \"\"\"\n \n style = self.styles[\"widget\"]\n if style.img is None:\n if style.bg_color is not None:\n self.draw_rect(screen, style)\n else:\n screen.blit(pygame.transform.scale(style.img, (self.rect[2], self.rect[3])), (self.rect[0],self.rect[1]))\n\n def handle_events(self, events):\n pass\n\nclass Layout(Widget):\n def __init__(self, parent=None, rect: tuple = None, style: str = \"widget{background-color:#a9a9a9;}\\n\", extra_styles={}):\n super().__init__(parent, rect, style, extra_styles)\n self.parent = parent\n self.widgets=[]\n def addWidget(self, widget:Widget):\n self.widgets.append(widget)\n\nclass HorizontalLayout(Layout):\n def __init__(self, parent=None, rect: tuple = None, style: str = \"widget{background-color:#a9a9a9;}\\n\", extra_styles={}):\n super().__init__(parent, rect, style, extra_styles)\n\n def addWidget(self, widget:Widget, percent=None):\n self.widgets.append([percent, widget])\n widget.parent = self\n\n def paint(self, screen):\n l = len(self.widgets)\n if self.rect is None:\n x = self.parent.rect[0]\n y = self.parent.rect[1]\n w = self.parent.rect[2]\n h = self.parent.rect[3]\n else:\n x = self.rect[0]\n y = self.rect[1]\n w = self.rect[2]\n h = self.rect[3]\n for percent, widget in self.widgets:\n if percent is None:\n percent=1/l \n widget.setRect([x,y,int(w*percent),h])\n x += int(w*percent)\n widget.paint(screen)\n\n def handle_events(self, events):\n for percent, widget in self.widgets:\n widget.handle_events(events)\n\n\nclass VerticalLayout(Layout):\n def __init__(\n self, \n parent=None, \n rect: tuple = None, \n style: str = \"\", \n extra_styles={}\n ):\n super().__init__(parent, rect, style, extra_styles)\n\n def addWidget(self, widget:Widget, percent=None):\n self.widgets.append([percent, widget])\n widget.parent = self\n\n def paint(self, screen):\n l = len(self.widgets)\n if self.rect is None:\n x = self.parent.rect[0]\n y = self.parent.rect[1]\n w = self.parent.rect[2]\n h = self.parent.rect[3]\n else:\n x = self.rect[0]\n y = self.rect[1]\n w = self.rect[2]\n h = self.rect[3]\n for percent, widget in self.widgets:\n if percent is None:\n percent=1/l \n widget.setRect([x,y,w,int(h*percent)])\n y += int(h*percent)\n widget.paint(screen)\n\n def handle_events(self, events):\n for percent, widget in self.widgets:\n widget.handle_events(events)\n\nclass FormLayout(Layout):\n def __init__(self, parent=None, rect: tuple = None, default_title_align:str=\"left\", style: str = \"\", form_ratio=0.5, fixed_title_size:int=None, extra_styles={}):\n self.form_ratio = form_ratio\n self.default_title_align = default_title_align\n self.fixed_title_size = fixed_title_size\n super().__init__(parent, rect, style, extra_styles)\n\n def addWidget(self, widget:Widget, title=\"\"):\n self.widgets.append([Label(title, align=self.default_title_align), widget])\n widget.parent = self\n\n def paint(self, screen):\n l = len(self.widgets)\n if self.rect is None:\n x = self.parent.rect[0]\n y = self.parent.rect[1]\n w = self.parent.rect[2]\n h = self.parent.rect[3]\n else:\n x = self.rect[0]\n y = self.rect[1]\n w = self.rect[2]\n h = self.rect[3]\n\n for title, widget in self.widgets:\n if self.fixed_title_size is None:\n title_width = int(w*(1-self.form_ratio))\n else:\n title_width = self.fixed_title_size\n title.setRect([x,y,title_width,widget.rect[3]]) \n widget.setRect([x+title_width,y,w-title_width,widget.rect[3]])\n y += int(widget.rect[3])\n title.paint(screen)\n widget.paint(screen)\n\n def handle_events(self, events):\n for percent, widget in self.widgets:\n widget.handle_events(events)\n# =============================================== Timer ==========================================\nclass Timer():\n def __init__(self, callback_fn, intrval_s:float=0.1) -> None:\n self.callback_fn = callback_fn\n self.intrval_s = intrval_s\n self.started = False\n def start(self):\n self.started = True\n self.last_time = time.time()\n def stop(self):\n self.started = False \n def process(self):\n dt = time.time() - self.last_time\n if dt>=self.intrval_s:\n if self.callback_fn is not None:\n self.callback_fn()\n self.last_time = time.time()\n\n# =============================================== Window Manager ==========================================\n\nclass WindowManager():\n def __init__(self, window_title:str=\"\", resolution:tuple=(800,600), is_rezisable:bool=True):\n \"\"\"Builds a window managaer object\n \"\"\"\n if resolution is not None:\n if is_rezisable:\n self.screen = pygame.display.set_mode(resolution, pygame.RESIZABLE)\n else:\n self.screen = pygame.display.set_mode(resolution)\n else:\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n pygame.display.set_caption(window_title)\n self.widgets = []\n self.events = None\n self.Running = True\n self.timers=[]\n self.menu = None\n self.update_rect()\n\n\n def build_menu_bar(self):\n self.menu = MenuBar(self)\n return self.menu\n\n def build_timer(self, callback_fn, intrval_ms:int=100):\n timer = Timer(callback_fn, intrval_ms)\n self.timers.append(timer)\n return timer\n\n def add_timer(self, timer:Timer):\n self.timers.append(timer)\n return timer\n\n def update_rect(self):\n w, h = pygame.display.get_surface().get_size()\n if self.menu is not None:\n self.rect = [0,self.menu.height,w,h]\n else:\n self.rect = [0,0,w,h]\n\n def addWidget(self, widget:Widget):\n \"\"\"Adds a new widget to the widgets list\n\n Args:\n widget (Widget): The widget to be added\n \"\"\"\n self.widgets.append(widget)\n widget.parent = self\n \n def process(self, background_color:tuple = (0,0,0)):\n self.screen.fill(background_color)\n self.events = pygame.event.get()\n for event in self.events:\n if event.type == pygame.VIDEORESIZE:\n self.update_rect()\n\n for widget in self.widgets:\n if widget.visible:\n widget.paint(self.screen)\n widget.handle_events(self.events)\n\n if self.menu is not None:\n self.menu.paint(self.screen)\n self.menu.handle_events(self.events)\n # Update UI\n pygame.display.update()\n # Check timerds\n for timer in self.timers:\n timer.process()\n\n def loop(self):\n \"\"\"[summary]\n \"\"\"\n # Main loop\n while self.Running:\n self.process()\n\n for event in self.events:\n if event.type == pygame.QUIT:\n print(\"Done\")\n self.Running=False\n # Update UI\n pygame.display.update()\n\nclass Sprite(Widget):\n def __init__(\n self,\n image_path:str, \n parent=None,\n rect:tuple=[0,0,800,600], \n clicked_event_handler=None\n ):\n Widget.__init__(self,parent,rect, style=\n\"\"\"\n widget{\n\"\"\"\n+\n f\"\"\"\n background-image:url('file:///{image_path}')\n \"\"\"\n+\n\"\"\"\n }\n\"\"\",extra_styles={\"label\":WidgetStyle(align=\"left\")})\n\n\n\n# =============================================== ImageBox ==========================================\n\nclass ImageBox(Widget):\n def __init__(\n self,\n image:np.ndarray=None, \n parent=None,\n rect:tuple=[0,0,800,600], \n style:str=\"btn.normal{color:white; background-color:#878787;}\\nbtn.hover{color:white; background-color:#a9a9a9};\\nbtn.pressed{color:red; background-color:#565656};\",\n clicked_event_handler=None,\n color_key=None,\n alpha=100\n ):\n Widget.__init__(self,parent,rect, style,extra_styles={\"label\":WidgetStyle(align=\"left\")})\n self.color_key = color_key\n self.alpha = alpha\n if image is not None:\n self.setImage(image)\n else:\n self.surface = None\n\n def setImage(self, image:np.ndarray):\n self.surface = pygame.pixelcopy.make_surface(np.swapaxes(image,0,1).astype(np.uint8))\n if self.color_key is not None:\n self.surface.set_colorkey(self.color_key)\n if self.alpha<100:\n self.surface.set_alpha(self.alpha)\n self.surface = pygame.transform.scale(self.surface, (self.rect[2], self.rect[3]))\n\n def paint(self, screen):\n if self.surface is not None:\n screen.blit(pygame.transform.scale(self.surface, (self.rect[2], self.rect[3])),(self.rect[0],self.rect[1]))\n\n# =============================================== Label ==========================================\n\nclass Label(Widget):\n def __init__(\n self,\n text, \n parent=None,\n rect:tuple=[0,0,100,50], \n align:str=\"left\",\n left_margin:int=0,\n right_margin:int=0,\n style:str=\"\",\n clicked_event_handler=None\n ):\n Widget.__init__(self, parent, rect, style,extra_styles={\"label\":WidgetStyle(align=align,left_margin=left_margin,right_margin=right_margin)})\n self.text = text\n self.hovered=False\n self.pressed=False\n self.clicked_event_handler = clicked_event_handler\n self.setStyleSheet(style)\n\n def setText(self,text:str)->None:\n \"\"\"Changes the text to be displayed inside the label\n\n Args:\n text (str): The text to be displayed\n \"\"\"\n self.text = text\n\n\n def paint(self, screen):\n \"\"\"Paints the button\n\n Args:\n screen ([type]): The screen on which to blit\n \"\"\"\n style = self.styles[\"label\"]\n if style.img is None:\n self.draw_rect(screen, style)\n else:\n screen.blit(pygame.transform.scale(style.img, (self.rect[2], self.rect[3])), (self.rect[0],self.rect[1]))\n\n self.blit_text(self.text,style, screen)\n\n\n# =============================================== Label ==========================================\n\nclass TextBox(Widget):\n def __init__(\n self,\n text, \n parent=None,\n rect:tuple=[0,0,100,50], \n align:str=\"left\",\n left_margin:int=5,\n right_margin:int=0, \n style:str=\"\",\n clicked_event_handler=None,\n lost_focus_event_handler=None\n ):\n Widget.__init__(self, parent, rect, style,extra_styles={\"textbox\":WidgetStyle(align=align,left_margin=left_margin,right_margin=right_margin,border_size=1, bg_color=get_color(\"white\"))})\n self.text = text\n self.hovered=False\n self.pressed=False\n self.focused=False\n self.cursorPos=len(text)\n self.clicked_event_handler = clicked_event_handler\n self.lost_focus_event_handler = lost_focus_event_handler\n self.setStyleSheet(style)\n\n def setText(self,text:str)->None:\n \"\"\"Changes the text to be displayed inside the label\n\n Args:\n text (str): The text to be displayed\n \"\"\"\n self.text = text\n\n def handle_events(self, events):\n \"\"\"Handles the events\n\n \"\"\"\n for event in events:\n if event.type == pygame.MOUSEMOTION:\n self.hovered = is_point_inside_rect(event.pos,self.rect_left_top_right_bottom)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n self.hovered = is_point_inside_rect(event.pos,self.rect_left_top_right_bottom)\n if self.hovered == True:\n self.focused=True\n if self.clicked_event_handler is not None:\n self.clicked_event_handler()\n style = self.styles[\"textbox\"]\n if len(self.text)>0:\n sz = style.font.size(self.text[0])[0]\n self.cursorPos = min(int((event.pos[0] - self.rect[0] - style.left_margin)/sz), len(self.text))\n else:\n self.cursorPos = 0\n else:\n self.focused=False\n\n if self.lost_focus_event_handler is not None:\n self.lost_focus_event_handler()\n elif event.type == pygame.KEYDOWN:\n if self.focused:\n if event.key == pygame.K_RETURN:\n pass\n elif event.key == pygame.K_BACKSPACE:\n if len(self.text)==self.cursorPos:\n self.text = self.text[:-1]\n self.cursorPos-=1\n else:\n self.text = self.text[:self.cursorPos-1]+self.text[self.cursorPos:]\n self.cursorPos-=1\n elif event.key == pygame.K_DELETE:\n if len(self.text)==self.cursorPos:\n pass\n else:\n self.text = self.text[:self.cursorPos]+self.text[self.cursorPos+1:]\n\n else:\n if len(self.text)==self.cursorPos:\n self.text += event.unicode\n self.cursorPos+=1\n else:\n self.text = self.text[:self.cursorPos]+event.unicode+self.text[self.cursorPos:]\n self.cursorPos+=1\n\n elif event.type == pygame.MOUSEBUTTONUP:\n pass\n def paint(self, screen):\n \"\"\"Paints the button\n\n Args:\n screen ([type]): The screen on which to blit\n \"\"\"\n style = self.styles[\"textbox\"]\n if style.img is None:\n self.draw_rect(screen, style)\n else:\n screen.blit(pygame.transform.scale(style.img, (self.rect[2], self.rect[3])), (self.rect[0],self.rect[1]))\n\n self.blit_text(self.text,style, screen)\n\n # Blit the cursor\n if self.focused:\n x = style.font.size(self.text[0:self.cursorPos])[0]\n pygame.draw.line(screen,style.text_color,(self.rect[0]+x,self.rect[1]+5),(self.rect[0]+x,self.rect[1]+self.rect[3]-5),2)\n \n# =============================================== Button ==========================================\n\nclass Button(Widget):\n def __init__(\n self,\n text,\n parent=None,\n rect:tuple=[0,0,100,50], \n style:str=\"\",\n extra_styles:dict={},\n is_toggle=False,\n clicked_event_handler=None,\n lost_focus_event_handler=None\n ):\n Widget.__init__(\n self,\n parent,\n rect,\n style,\n self.merge_two_dicts({\n \"btn.normal\":WidgetStyle(border_radius=4,text_color=(255,255,255), bg_color=get_color(\"#878787\")),\n \"btn.hover\":WidgetStyle(border_radius=4,text_color=(255,255,255), bg_color=get_color(\"#a9a9a9\")),\n \"btn.pressed\":WidgetStyle(border_radius=4,text_color=(255,255,255), bg_color=get_color(\"#565656\")),\n }, extra_styles)\n )\n\n \n \n \n self.text = text\n self.is_toggle = is_toggle\n self.hovered=False\n self.pressed=False\n self.toggled=False\n self.clicked_event_handler = clicked_event_handler\n self.lost_focus_event_handler = lost_focus_event_handler\n\n\n def setText(self,text:str)->None:\n \"\"\"Changes the text to be displayed inside the label\n\n Args:\n text (str): The text to be displayed\n \"\"\"\n self.text = text\n\n def paint(self, screen):\n \"\"\"Paints the button\n\n Args:\n screen ([type]): The screen on which to blit\n \"\"\"\n \n if self.pressed:\n style = self.styles[\"btn.pressed\"]\n elif self.hovered:\n style = self.styles[\"btn.hover\"]\n else:\n style = self.styles[\"btn.normal\"]\n\n if style.img is None:\n self.draw_rect(screen, style)\n else:\n screen.blit(pygame.transform.scale(style.img, (self.rect[2], self.rect[3])), (self.rect[0],self.rect[1]))\n self.blit_text(self.text, style, screen)\n\n def handle_events(self, events):\n \"\"\"Handles the events\n\n \"\"\"\n for event in events:\n if event.type == pygame.MOUSEMOTION:\n self.hovered = is_point_inside_rect(event.pos,self.rect_left_top_right_bottom)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n self.hovered = is_point_inside_rect(event.pos,self.rect_left_top_right_bottom)\n if self.hovered == True:\n if self.is_toggle:\n if not self.toggled:\n self.pressed=not self.pressed\n self.toggled=True\n else:\n self.pressed=True\n if self.clicked_event_handler is not None:\n self.clicked_event_handler()\n else:\n if self.lost_focus_event_handler is not None:\n self.lost_focus_event_handler()\n \n\n elif event.type == pygame.MOUSEBUTTONUP:\n if not self.is_toggle:\n self.pressed=False\n self.toggled=False\n\n\n\n\n# =============================================== ProgressBar ==========================================\n\nclass ProgressBar(Widget):\n def __init__(\n self, \n parent=None,\n rect: tuple = [0, 0, 100, 50], \n style: str = \"brogressbar.outer{background-color:#ffffff;}\\nbrogressbar.inner{background-color:#ffffff;}\", \n value=0\n ):\n \"\"\"Builds a progressbar widget\n\n Args:\n rect (tuple, optional): Rectangle where to put the progressbar. Defaults to [0, 0, 100, 50].\n style (str, optional): [description]. Defaults to \"brogressbar.outer{background-color:#ffffff;}\\nbrogressbar.inner{background-color:#ffffff;}\".\n value (int, optional): [description]. Defaults to 0.\n \"\"\"\n super().__init__(\n parent,\n rect=rect, \n style=style, \n extra_styles={\n \"brogressbar.outer\":WidgetStyle(),\n \"brogressbar.inner\":WidgetStyle(),\n }\n )\n self.value=value\n\n def setValue(self, value):\n self.value = value \n\n def paint(self, screen):\n \"\"\"Paints the button\n\n Args:\n screen ([type]): The screen on which to blit\n \"\"\"\n \n outer_style = self.styles[\"brogressbar.outer\"]\n inner_style = self.styles[\"brogressbar.inner\"]\n if outer_style.img is None:\n self.draw_rect(screen, outer_style)\n else:\n screen.blit(pygame.transform.scale(outer_style.img, (self.rect[2], self.rect[3])), (self.rect[0],self.rect[1]))\n \n if inner_style.img is None:\n self.draw_rect(screen, inner_style)\n if inner_style.bg_color is not None:\n pygame.draw.rect(screen,inner_style.bg_color,[self.rect[0], self.rect[1], self.rect[2]*self.value, self.rect[3]])\n if inner_style.border_size>0:\n pygame.draw.rect(screen,inner_style.border_color,[self.rect[0], self.rect[1], self.rect[2]*self.value, self.rect[3]], inner_style.border_size)\n\n else:\n screen.blit(pygame.transform.scale(inner_style.img, (self.rect[2], self.rect[3])), (self.rect[0],self.rect[1]))\n \n\n\nclass Slider(Widget):\n def __init__(\n self, \n parent=None,\n rect: tuple = [0, 0, 100, 50], \n style: str = \"\", \n value=0,\n orientation=Horizontal,\n valueChanged_callback=None,\n mouse_down_callback=None\n ):\n \"\"\"Creates a Slider instance.\n\n Args:\n rect (tuple, optional): Rectangle where to put the progressbar. Defaults to [0, 0, 100, 50].\n style (str, optional): The style of the Slider. Defaults to \"\".\n value (int, optional): The initial value of the Slider.. Defaults to 0.\n orientation : The orientation of the Slider.\n\n valueChanged_callback : A callback function that is called when the value of the slider changes\n \"\"\"\n self.value = 0\n self.orientation = orientation\n self.hovered=False\n self.selector_hovered=False\n self.pressed=False\n\n if self.orientation == Horizontal:\n super().__init__(\n parent,\n rect=rect, \n style=style, \n extra_styles={\n \"slider.bar\":WidgetStyle(border_size=3,border_radius=3, text_color=get_color(\"white\"),bg_color=get_color(\"#878787\"), height=10),\n \"slider.selector.normal\":WidgetStyle(border_size=1,border_radius=15, text_color=get_color(\"white\"),bg_color=get_color(\"#a7a7a7\"), width=15, height=15),\n \"slider.selector.hover\":WidgetStyle(border_size=1,border_radius=15, text_color=get_color(\"white\"),bg_color=get_color(\"#c6c6c6\"), width=20, height=20),\n \"slider.selector.pressed\":WidgetStyle(border_size=1,border_radius=15, text_color=get_color(\"white\"),bg_color=get_color(\"#565656\"), width=15, height=15),\n }\n )\n else:\n super().__init__(\n parent,\n rect=rect, \n style=style, \n extra_styles={\n \"slider.bar\":WidgetStyle(border_size=3,border_radius=3, text_color=get_color(\"white\"),bg_color=get_color(\"#878787\"), width=10),\n \"slider.selector.normal\":WidgetStyle(border_size=1,border_radius=15, text_color=get_color(\"white\"),bg_color=get_color(\"#a7a7a7\"), width=15, height=15),\n \"slider.selector.hover\":WidgetStyle(border_size=1,border_radius=15, text_color=get_color(\"white\"),bg_color=get_color(\"#c6c6c6\"), width=20, height=20),\n \"slider.selector.pressed\":WidgetStyle(border_size=1,border_radius=15, text_color=get_color(\"white\"),bg_color=get_color(\"#565656\"), width=15, height=15),\n }\n )\n self.is_toggle = False\n self.setValue(value)\n self.valueChanged_callback = valueChanged_callback\n self.mouse_down_callback = mouse_down_callback\n\n def setValue(self, value:float):\n \"\"\"Sets the current value of the slider between 0 and 1\n\n Args:\n value (float): The value between 0 and 1\n \"\"\"\n self.value = value \n self.updateUIRects()\n\n def updateUIRects(self):\n \"\"\"This method updates the rectangles of selector and bar based on the current\n orientation and style of the slider.\n\n The orientation can be Horizontal or Vertical, and the style can be\n Slider.bar or Slider.selector.hover.\n\n If the selector is hovered, the style will be Slider.selector.hover; if not,\n the style will be Slider.selector.normal.\n\n The slider_rects will be updated according to the values in the value and\n rect variables, and the slider_rect_left_top_right_bottom will be updated according to the\n values in the value and selector_style variables.\n \"\"\" \n bar_style = self.styles[\"slider.bar\"]\n if self.selector_hovered:\n selector_style = self.styles[\"slider.selector.hover\"]\n else:\n selector_style = self.styles[\"slider.selector.normal\"]\n if self.orientation == Horizontal:\n vc = self.rect[1]+self.rect[3]//2\n self.bar_rect = [\n self.rect[0],\n vc-bar_style.height//2,\n self.rect[2], \n bar_style.height\n ]\n self.slider_rect= [\n self.rect[0]+(self.rect[2]-selector_style.width)*self.value,\n self.rect[1]+self.rect[3]//2-selector_style.height//2,\n selector_style.width,\n selector_style.height\n ]\n self.slider_rect_left_top_right_bottom = (\n self.slider_rect[0],\n self.slider_rect[1],\n self.slider_rect[0]+self.slider_rect[2],\n self.slider_rect[1]+self.slider_rect[3]\n )\n else:\n vc = self.rect[0]+self.rect[2]//2\n self.bar_rect = [\n vc-bar_style.width//2,\n self.rect[1],\n bar_style.width,\n self.rect[3] \n ] \n self.slider_rect= [\n self.rect[0]+self.rect[2]//2-selector_style.width//2, \n self.rect[1]+(self.rect[3]-selector_style.height)*self.value, \n selector_style.width, \n selector_style.height\n ]\n self.slider_rect_left_top_right_bottom = (\n self.slider_rect[0],\n self.slider_rect[1],\n self.slider_rect[0]+self.slider_rect[2],\n self.slider_rect[1]+self.slider_rect[3]\n )\n\n\n def setRect(self, rect:tuple)->None:\n \"\"\"Sets the rectangle for the widget.\n\n Args:\n rect (tuple): tuple of (x, y, width, height)\n \"\"\"\n self.rect=rect\n self.rect_left_top_right_bottom = (rect[0],rect[1],rect[0]+rect[2],rect[1]+rect[3])\n self.updateUIRects()\n\n\n def paint(self, screen):\n \"\"\"Paints the button\n\n Args:\n screen ([type]): The screen on which to blit\n \"\"\"\n \n bar_style = self.styles[\"slider.bar\"]\n if self.selector_hovered:\n selector_style = self.styles[\"slider.selector.hover\"]\n else:\n selector_style = self.styles[\"slider.selector.normal\"]\n\n if self.orientation == Horizontal:\n # Draw the bar ---------------------------------------------->\n\n if bar_style.img is None:\n self.draw_rect(screen, bar_style,self.bar_rect)\n else:\n screen.blit(pygame.transform.scale(bar_style.img, (self.bar_rect[2], self.bar_rect[3])), (self.bar_rect[0], self.bar_rect[1]))\n \n # Draw the bar ---------------------------------------------->\n if selector_style.img is None:\n if selector_style.bg_color is not None:\n pygame.draw.rect(screen,selector_style.bg_color,self.slider_rect, border_radius = selector_style.border_radius)\n if selector_style.border_size>0:\n pygame.draw.rect(screen,selector_style.border_color,self.slider_rect, selector_style.border_size, border_radius = selector_style.border_radius)\n else:\n screen.blit(pygame.transform.scale(selector_style.img, (self.rect[2], self.rect[3])), (self.rect[0],self.rect[1]))\n else:\n vc = self.rect[0]+self.rect[2]//2\n rect = [vc-bar_style.width,self.rect[1], bar_style.width+2,self.rect[3]]\n if bar_style.img is None:\n self.draw_rect(screen, bar_style,[self.rect[0]+5,self.rect[1],self.rect[2]-10,self.rect[3]])\n else:\n screen.blit(pygame.transform.scale(bar_style.img, (self.rect[2], self.rect[3])), (self.rect[0],self.rect[1]))\n \n if selector_style.img is None:\n if selector_style.bg_color is not None:\n pygame.draw.rect(screen,selector_style.bg_color,self.slider_rect, border_radius = selector_style.border_radius)\n if selector_style.border_size>0:\n pygame.draw.rect(screen,selector_style.border_color,self.slider_rect, selector_style.border_size, border_radius = selector_style.border_radius)\n else:\n screen.blit(pygame.transform.scale(selector_style.img, (self.rect[2], self.rect[3])), (self.rect[0],self.rect[1]))\n\n def handle_events(self, events):\n \"\"\"Handles the events\n\n \"\"\"\n for event in events:\n if event.type == pygame.MOUSEMOTION:\n self.selector_hovered = is_point_inside_rect(event.pos,self.slider_rect_left_top_right_bottom)\n self.hovered = is_point_inside_rect(event.pos,self.rect_left_top_right_bottom)\n if self.pressed:\n self.value = min(max(0,(event.pos[0]-self.rect[0])/self.rect[2]),1)\n if self.valueChanged_callback is not None:\n self.valueChanged_callback(self.value) \n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n self.selector_hovered = is_point_inside_rect(event.pos,self.slider_rect_left_top_right_bottom)\n self.hovered = is_point_inside_rect(event.pos,self.rect_left_top_right_bottom)\n if self.hovered == True and self.selector_hovered == False:\n self.value = min(max(0,(event.pos[0]-self.rect[0])/self.rect[2]),1)\n if self.valueChanged_callback is not None:\n self.valueChanged_callback(self.value) \n elif self.selector_hovered:\n self.pressed = True\n\n\n\n elif event.type == pygame.MOUSEBUTTONUP:\n self.selector_hovered = is_point_inside_rect(event.pos,self.slider_rect_left_top_right_bottom)\n self.hovered = is_point_inside_rect(event.pos,self.rect_left_top_right_bottom)\n if self.selector_hovered:\n if self.pressed:\n self.value = min(max(0,(event.pos[0]-self.rect[0])/self.rect[2]),1)\n if self.valueChanged_callback is not None:\n self.valueChanged_callback(self.value) \n self.pressed = False\n\n\n# =============================================== List ==========================================\nclass List(Widget):\n def __init__(\n self,\n parent:WindowManager=None,\n list=[],\n style: str = \"\",\n selection_changed_callback=None\n ):\n Widget.__init__(self,parent,style=style, extra_styles={\n \"list\":WidgetStyle(),\n \"list.item.normal\":WidgetStyle(height=20,bg_color=get_color(\"#a7a7a7\")),\n \"list.item.hover\":WidgetStyle(height=20,bg_color=get_color(\"#c6c6c6\")),\n \"list.item.pressed\":WidgetStyle(height=20,bg_color=get_color(\"#565656\"))\n })\n self.list = list\n self.parent = parent\n self.pressed = False\n self.hovered = False\n self.hovered_item_index = 0\n self.current_item = 0\n self.scroll_value = 0\n self.setStyleSheet(style)\n self.first_visible = 0\n self.selection_changed_callback = selection_changed_callback\n self.last_mouse_y_pos = 0\n self.scrolling = False\n\n def paint(self, screen):\n \"\"\"Paints the button\n\n Args:\n screen ([type]): The screen on which to blit\n \"\"\"\n \n outer_style = self.styles[\"list\"]\n\n item_style_hovered = self.styles[\"list.item.hover\"]\n item_style_normal = self.styles[\"list.item.normal\"]\n item_style_selected = self.styles[\"list.item.normal\"]\n\n if outer_style.img is None:\n self.draw_rect(screen, outer_style,[self.rect[0],self.rect[1]+5,self.rect[2],self.rect[3]-10])\n else:\n screen.blit(pygame.transform.scale(outer_style.img, (self.rect[2], self.rect[3])), (self.rect[0],self.rect[1]))\n \n y_pos = self.rect[1]\n x_pos = self.rect[0]\n for i in range(self.first_visible,len(self.list)):\n entry= self.list[i]\n item_rect=[x_pos, y_pos, self.rect[2], item_style_hovered.height]\n if i==self.hovered_item_index:\n self.draw_rect(screen, item_style_hovered,item_rect)\n self.blit_text(entry, item_style_hovered, screen, item_rect)\n y_pos += item_style_hovered.height\n elif i==self.current_item:\n self.draw_rect(screen, item_style_selected,item_rect)\n self.blit_text(entry, item_style_selected, screen, item_rect)\n y_pos += item_style_selected.height\n else:\n self.blit_text(entry, item_style_normal, screen, item_rect)\n y_pos += item_style_normal.height\n if y_pos>self.rect_left_top_right_bottom[3]:\n break\n\n\n def handle_events(self, events):\n \"\"\"Handles the events\n\n \"\"\"\n for event in events:\n if event.type == pygame.MOUSEMOTION:\n self.hovered = is_point_inside_rect(event.pos,self.rect_left_top_right_bottom)\n if self.hovered:\n self.hovered_item_index = min((event.pos[1]-self.rect[1])//self.styles[\"list.item.normal\"].height+self.first_visible,len(self.list)-1)\n else:\n self.hovered_item_index = -1\n if self.pressed:\n dy = event.pos[1]-self.last_mouse_y_pos\n if abs(dy)>5:\n self.last_mouse_y_pos = event.pos[1]\n self.scrolling = True\n if dy>5:\n if self.first_visible>0:\n self.first_visible-=1\n else:\n if self.first_visible<len(self.list)-1:\n self.first_visible+=1\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n self.hovered = is_point_inside_rect(event.pos,self.rect_left_top_right_bottom)\n if self.hovered:\n self.pressed= True\n self.last_mouse_y_pos = event.pos[1]\n\n\n\n elif event.type == pygame.MOUSEBUTTONUP:\n self.pressed= False\n if self.hovered:\n if not self.scrolling:\n self.current_item = self.hovered_item_index\n if not self.pressed:\n self.value = min(max(0,(event.pos[0]-self.rect[0])/self.rect[2]),1)\n if self.selection_changed_callback is not None:\n self.selection_changed_callback(self.value) \n self.scrolling = False\n\n# =============================================== Menus ==========================================\n# ---------------------------------------------------- Menu Bar -----------------------------------------------------\n\nclass MenuBar(Widget):\n def __init__(\n self,\n parent:WindowManager,\n style: str = \"menu_bar{background-color:#878787;}\\n\"\n ):\n Widget.__init__(self,parent,style=style, extra_styles={\"menu_bar\":WidgetStyle()})\n self.parent = parent\n self.menus=[]\n self.setStyleSheet(style)\n\n def addMenu(self, menu):\n self.menus.append(menu)\n\n @property\n def width(self):\n w, h = pygame.display.get_surface().get_size()\n return w\n\n @property\n def height(self):\n style = self.styles[\"menu_bar\"]\n w, h = pygame.display.get_surface().get_size()\n if style.height is not None:\n h = style.height\n else:\n h = 20\n return h\n\n def paint(self, screen):\n \"\"\"Paints the manue\n\n Args:\n screen ([type]): The screen on which to blit\n \"\"\"\n style = self.styles[\"menu_bar\"]\n w, h = pygame.display.get_surface().get_size()\n if style.height is not None:\n h = style.height\n else:\n h = 20\n self.setRect([0,0,w,h])\n\n self.draw_rect(screen, style)\n\n rect_x_start = 0\n rect_y_start=0\n for menu in self.menus:\n if menu.visible:\n rect_x_start, rect_y_start = menu.prepare(rect_x_start, rect_y_start)\n menu.paint(screen)\n\n def handle_events(self, events):\n for menu in self.menus:\n if menu.visible:\n menu.handle_events(events)\n return super().handle_events(events) \n\n# ---------------------------------------------------- Menu -----------------------------------------------------\n\n\nclass Menu(Button):\n def __init__(\n self,\n parent:MenuBar,\n caption=\"\",\n style:str=\"\",\n ):\n Button.__init__(self, caption,style=style,extra_styles={\n \"btn.normal\":WidgetStyle(border_radius=0,text_color=(255,255,255), bg_color=get_color(\"#878787\")),\n \"btn.hover\":WidgetStyle(border_radius=0,text_color=(255,255,255), bg_color=get_color(\"#a9a9a9\")),\n \"btn.pressed\":WidgetStyle(border_radius=0,text_color=(255,255,255), bg_color=get_color(\"#565656\")),\n })\n self.parent = parent\n self.actions=[]\n parent.addMenu(self)\n self.clicked_event_handler = self.fn_clicked_event_handler\n self.lost_focus_event_handler = self.fn_lost_focus_event_handler\n\n def fn_clicked_event_handler(self):\n for action in self.actions:\n action.visible=not action.visible\n\n def fn_lost_focus_event_handler(self):\n for action in self.actions:\n action.visible=False\n\n def addAction(self, action):\n action.visible=False\n self.actions.append(action)\n\n def prepare(self, rect_xstart=0, rect_ystart=0):\n style = self.styles[\"widget\"]\n if style.height is not None:\n h = style.height\n else:\n h = 20\n if style.width is not None:\n w = style.width\n else:\n w = 100\n\n self.setRect([rect_xstart,rect_ystart,w, h])\n\n return rect_xstart + w, rect_ystart\n\n def paint(self, screen):\n style = self.styles[\"widget\"]\n Button.paint(self, screen)\n y=self.rect[1]+self.rect[3]\n for action in self.actions:\n _, y = action.prepare(self.rect[0], y)\n if action.visible:\n action.paint(screen)\n\n\n\n def handle_events(self, events):\n for action in self.actions:\n if action.visible:\n action.handle_events(events)\n return super().handle_events(events) \n\n\n# ---------------------------------------------------- Action -----------------------------------------------------\n\n\nclass Action(Button):\n def __init__(\n self,\n parent:Menu,\n caption=\"\",\n style:str=\"\",\n ):\n Button.__init__(self, caption,style=style,extra_styles={\n \"btn.normal\":WidgetStyle(border_radius=0,text_color=(255,255,255), bg_color=get_color(\"#878787\")),\n \"btn.hover\":WidgetStyle(border_radius=0,text_color=(255,255,255), bg_color=get_color(\"#a9a9a9\")),\n \"btn.pressed\":WidgetStyle(border_radius=0,text_color=(255,255,255), bg_color=get_color(\"#565656\")), \n })\n self.parent = parent\n self.actions=[]\n parent.addAction(self)\n\n def prepare(self, rect_xstart=0, rect_ystart=0):\n style = self.styles[\"widget\"]\n if style.height is not None:\n h = style.height\n else:\n h = 20\n if style.width is not None:\n w = style.width\n else:\n w = 100\n\n self.setRect([rect_xstart,rect_ystart,w, h]) \n return rect_xstart, rect_ystart + h\n\n def paint(self, screen):\n style = self.styles[\"widget\"]\n\n Button.paint(self, screen)\n\nclass MenuSeparator(Label):\n def __init__(\n self,\n parent:Menu,\n caption=\"\",\n style:str=\"\",\n ):\n Label.__init__(self, caption,style=style)\n self.parent = parent\n self.actions=[]\n parent.addAction(self)\n\n\n def prepare(self, rect_xstart=0, rect_ystart=0):\n style = self.styles[\"widget\"]\n if style.height is not None:\n h = style.height\n else:\n h = 1\n if style.width is not None:\n w = style.width\n else:\n w = 100\n\n self.setRect([rect_xstart,rect_ystart,w, h]) \n return rect_xstart, rect_ystart + h\n\n\n def paint(self, screen):\n style = self.styles[\"widget\"]\n self.draw_rect(screen, style)\n\n\n" ]
[ [ "numpy.swapaxes" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kstisser/DeepLearningPointCloud
[ "646e8e20ec62e502c0dc95d73f755809df05706b" ]
[ "ModelBackbone/pointPillarModel.py" ]
[ "import numpy as np\nfrom DataTools import defs\nimport tensorflow as tf\nimport os\nfrom . import losses\n\nclass PointPillarModel:\n def __init__(self, modelFileLocation, logDir=\"../Logs/\"):\n self.modelFileLocation = modelFileLocation\n self.logDir = logDir\n\n #reference to https://github.com/tyagi-iiitv/PointPillars.git\n def createModelBackbone(self, pillarsModel, trainPillars, trainLabels, testPillars, testLabels, input_pillars, input_indices):\n # 2d cnn backbone\n\n # Block1(S, 4, C)\n x = pillarsModel\n for n in range(4):\n S = (2, 2) if n == 0 else (1, 1)\n x = tf.keras.layers.Conv2D(defs.nb_channels, (3, 3), strides=S, padding=\"same\", activation=\"relu\",\n name=\"cnn/block1/conv2d%i\" % n)(x)\n x = tf.keras.layers.BatchNormalization(name=\"cnn/block1/bn%i\" % n, fused=True)(x)\n x1 = x\n\n # Block2(2S, 6, 2C)\n for n in range(6):\n S = (2, 2) if n == 0 else (1, 1)\n x = tf.keras.layers.Conv2D(2 * defs.nb_channels, (3, 3), strides=S, padding=\"same\", activation=\"relu\",\n name=\"cnn/block2/conv2d%i\" % n)(x)\n x = tf.keras.layers.BatchNormalization(name=\"cnn/block2/bn%i\" % n, fused=True)(x)\n x2 = x\n\n # Block3(4S, 6, 4C)\n for n in range(6):\n S = (2, 2) if n == 0 else (1, 1)\n x = tf.keras.layers.Conv2D(2 * defs.nb_channels, (3, 3), strides=S, padding=\"same\", activation=\"relu\",\n name=\"cnn/block3/conv2d%i\" % n)(x)\n x = tf.keras.layers.BatchNormalization(name=\"cnn/block3/bn%i\" % n, fused=True)(x)\n x3 = x\n\n # Up1 (S, S, 2C)\n up1 = tf.keras.layers.Conv2DTranspose(2 * defs.nb_channels, (3, 3), strides=(1, 1), padding=\"same\", activation=\"relu\",\n name=\"cnn/up1/conv2dt\")(x1)\n up1 = tf.keras.layers.BatchNormalization(name=\"cnn/up1/bn\", fused=True)(up1)\n\n # Up2 (2S, S, 2C)\n up2 = tf.keras.layers.Conv2DTranspose(2 * defs.nb_channels, (3, 3), strides=(2, 2), padding=\"same\", activation=\"relu\",\n name=\"cnn/up2/conv2dt\")(x2)\n up2 = tf.keras.layers.BatchNormalization(name=\"cnn/up2/bn\", fused=True)(up2)\n\n # Up3 (4S, S, 2C)\n up3 = tf.keras.layers.Conv2DTranspose(2 * defs.nb_channels, (3, 3), strides=(4, 4), padding=\"same\", activation=\"relu\",\n name=\"cnn/up3/conv2dt\")(x3)\n up3 = tf.keras.layers.BatchNormalization(name=\"cnn/up3/bn\", fused=True)(up3)\n\n # Concat\n concat = tf.keras.layers.Concatenate(name=\"cnn/concatenate\")([up1, up2, up3])\n#conv layer over this- same size\n#single 1x1 or just this\n#dice + bin crossentropy\n pillar_net = concat\n if defs.detectionMethod == defs.DetectionMethod.DETECTIONHEAD:\n # Detection head\n occ = tf.keras.layers.Conv2D(defs.nb_anchors, (1, 1), name=\"occupancy/conv2d\", activation=\"sigmoid\")(concat)\n\n loc = tf.keras.layers.Conv2D(defs.nb_anchors * 3, (1, 1), name=\"loc/conv2d\", kernel_initializer=tf.keras.initializers.TruncatedNormal(0, 0.001))(concat)\n loc = tf.keras.layers.Reshape(tuple(i//2 for i in image_size) + (nb_anchors, 3), name=\"loc/reshape\")(loc)\n\n size = tf.keras.layers.Conv2D(defs.nb_anchors * 3, (1, 1), name=\"size/conv2d\", kernel_initializer=tf.keras.initializers.TruncatedNormal(0, 0.001))(concat)\n size = tf.keras.layers.Reshape(tuple(i//2 for i in image_size) + (nb_anchors, 3), name=\"size/reshape\")(size)\n\n angle = tf.keras.layers.Conv2D(defs.nb_anchors, (1, 1), name=\"angle/conv2d\")(concat)\n\n heading = tf.keras.layers.Conv2D(defs.nb_anchors, (1, 1), name=\"heading/conv2d\", activation=\"sigmoid\")(concat)\n\n clf = tf.keras.layers.Conv2D(defs.nb_anchors * defs.nb_classes, (1, 1), name=\"clf/conv2d\")(concat)\n clf = tf.keras.layers.Reshape(tuple(i // 2 for i in image_size) + (defs.nb_anchors, defs.nb_classes), name=\"clf/reshape\")(clf)\n\n pillar_net = tf.keras.models.Model([input_pillars, input_indices], [occ, loc, size, angle, heading, clf])\n elif defs.detectionMethod == defs.DetectionMethod.BINARY:\n #What do do here? \n print(\"Setting Binary Dense Layer!\")\n pillar_net = tf.keras.layers.Dense(defs.max_points, activation = 'sigmoid')(concat)\n pillar_net = tf.keras.models.Model([input_pillars, input_indices], [pillar_net])\n else:\n print(\"Error! Don't recognize the type of detection head!\")\n\n #try loading weights\n #pillar_net.load_weights(os.path.join(\"./\", \"model.h5\"))\n\n #loss\n loss = losses.PointPillarNetworkLoss()\n\n #optimizer\n optimizer = tf.keras.optimizers.Adam(lr=defs.learning_rate, decay=defs.decay_rate)\n #compile\n #pillar_net.compile(optimizer, loss=loss.losses())\n pillar_net.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy())\n\n #epoch_to_decay = int(\n #np.round(defs.iters_to_decay / defs.batch_size * int(np.ceil(float(len(label_files)) / params.batch_size))))\n '''callbacks = [\n tf.keras.callbacks.TensorBoard(log_dir=\"../Logs/\"),\n tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(\"./\", \"myBackboneModel.h5\"),\n monitor='val_loss', save_best_only=True),\n tf.keras.callbacks.EarlyStopping(patience=20, monitor='val_loss'),\n ]'''\n\n print(pillar_net.summary()) \n # \n # Train and save\n try:\n #trainLabels = np.array(trainLabels)\n print(\"Training data size: \", trainPillars.shape)\n print(\"Training labels shape: \", len(trainLabels))\n #print(trainLabels)\n '''pillar_net.fit(trainPillars,\n validation_data = trainLabels,\n steps_per_epoch=len(trainPillars),\n callbacks=callbacks,\n use_multiprocessing=True,\n epochs=int(defs.total_training_epochs),\n workers=6)'''\n pillar_net.fit(trainPillars,\n validation_data = trainLabels,\n epochs=int(defs.total_training_epochs))\n except KeyboardInterrupt:\n model_str = \"interrupted_%s.h5\" % time.strftime(\"%Y%m%d-%H%M%S\")\n pillar_net.save(os.path.join(\"../Logs/\", model_str))\n print(\"Interrupt. Saving output to %s\" % os.path.join(os.getcwd(), self.logDir[1:], model_str)) \n " ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2DTranspose", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.initializers.TruncatedNormal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
liangstein/ChabBot-PyTorch
[ "0095afe636dfe3fa09ffa666b00cae377b0e66bf" ]
[ "train_chatbot.py" ]
[ "import torch\nfrom torch import nn,optim\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom models_are_here import Attention_layer,EncoderRNN,DecoderRNN\nimport pickle\nimport numpy as np\n'''with open(\"less_length_questions\",\"rb\") as f:\n questions_tok=pickle.load(f)\n\nwith open(\"less_length_answers\",\"rb\") as f:\n answers_tok=pickle.load(f)\n\nQ_A_dict={}\nquestions_tok_no_repeat,answers_tok_no_repeat=[],[]\nfor i in range(len(questions_tok)):\n Q=tuple(questions_tok[i])\n A=tuple(answers_tok[i])\n if Q in Q_A_dict:\n pass\n else:\n Q_A_dict[Q] = A\n\nfor x in Q_A_dict:\n questions_tok_no_repeat.append(x)\n answers_tok_no_repeat.append(Q_A_dict[x])\n\nwith open(\"Q_no_repeat\",\"wb\") as f:\n pickle.dump(questions_tok_no_repeat,f,protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open(\"A_no_repeat\",\"wb\") as f:\n pickle.dump(answers_tok_no_repeat,f,protocol=pickle.HIGHEST_PROTOCOL)'''\n\nwith open(\"Q_no_repeat\",\"rb\") as f:\n questions_tok=pickle.load(f)\n\nwith open(\"A_no_repeat\",\"rb\") as f:\n answers_tok=pickle.load(f)\n\nmaxlen_q,maxlen_a=19,19\nmaxlength_list=[5,10,15,20]\n'''with open(\"length_classified_questions\",\"rb\") as f:\n length_classified_questions=pickle.load(f)\n\nwith open(\"length_classified_answers\",\"rb\") as f:\n length_classified_answers=pickle.load(f)'''\n\n\nwith open(\"word_index_dict\",\"rb\") as f:\n word_index_dict=pickle.load(f)\n\nwith open(\"index_word_dict\",\"rb\") as f:\n index_word_dict=pickle.load(f)\n\n\nsetting_batch_size=400\nencoder=EncoderRNN(len(word_index_dict)+1,1024,1024).cuda() # input has no EOS indice\ndecoder=DecoderRNN(1024,1024,len(index_word_dict)+2).cuda() # final output contains EOS indice\nattention=Attention_layer(maxlen_q+1).cuda()\nparams_encoder,params_decoder,params_attention=\\\n list(encoder.parameters()),list(decoder.parameters()),list(attention.parameters())\n#attention_layer_list=[Attention_layer(ele).cuda() for ele in maxlength_list]\n#attention_layers_params=[ele.parameters() for ele in attention_layer_list]\noptimizer=optim.Adam(params_encoder+params_decoder+params_attention)\nsheduler=optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode=\"min\",factor=0.5,patience=10)\nloss=nn.CrossEntropyLoss(ignore_index=0)\nsteps_per_epoch=int(len(questions_tok)/setting_batch_size)\nfor epoch in range(2000):\n loss_lists = []\n all_labels=np.arange(0,len(questions_tok));np.random.shuffle(all_labels)\n batch_labels=np.array_split(all_labels,int(len(questions_tok)/setting_batch_size))\n for labels in batch_labels:\n batch_size=len(labels)\n q_vec=np.zeros((batch_size,maxlen_q+1))\n a_vec=np.zeros((batch_size,maxlen_a+1))\n for label_of_label,label in enumerate(labels):\n for j1,ele1 in enumerate(questions_tok[label]):\n q_vec[label_of_label,j1]=word_index_dict[ele1]\n for j2,ele2 in enumerate(answers_tok[label]):\n a_vec[label_of_label,j2]=word_index_dict[ele2]+1\n a_vec[label_of_label,j2+1]=1\n input_tensor=Variable(torch.from_numpy(q_vec).type(torch.LongTensor)).cuda()\n outputs,_=encoder(input_tensor,attention)\n encoded_tensor=outputs\n answer=decoder(encoded_tensor[:])\n # output has additional dimension due to EOS indice\n l=loss(answer.contiguous().view(-1,len(word_index_dict)+2),\n torch.from_numpy(a_vec).type(torch.LongTensor).view(-1).cuda())\n for i in params_attention:\n i.grad=None\n for i in params_encoder:\n i.grad=None\n for i in params_decoder:\n i.grad=None\n l.backward()\n optimizer.step()\n print(l)\n loss_lists.append(l.cpu().data.numpy())\n with open(\"losses\",\"a\") as f:\n epoch_loss=np.mean(loss_lists)\n f.write(\"Epoch: {}, Loss: {}\\n\".format(str(epoch),str(epoch_loss)))\n sheduler.step(epoch_loss)\n #check model weights\n with open(\"weights/encoder\",\"wb\") as f:\n pickle.dump([ele.cpu() for ele in params_encoder],f,protocol=pickle.HIGHEST_PROTOCOL)\n with open(\"weights/decoder\",\"wb\") as f:\n pickle.dump([ele.cpu() for ele in params_decoder],f,protocol=pickle.HIGHEST_PROTOCOL)\n with open(\"weights/attention\", \"wb\") as f:\n pickle.dump([ele.cpu() for ele in params_attention], f, protocol=pickle.HIGHEST_PROTOCOL)\n" ]
[ [ "torch.optim.Adam", "torch.nn.CrossEntropyLoss", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.from_numpy", "numpy.random.shuffle", "numpy.mean", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
drwaterman/yellowbrick
[ "aa37696219747137b5ae9e5482c8397c086b89d8" ]
[ "docs/api/regressor/alphas.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom sklearn.linear_model import LassoCV\n\nfrom yellowbrick.regressor import AlphaSelection\n\n\nif __name__ == '__main__':\n # Load the regression data set\n df = pd.read_csv(\"../../../examples/data/concrete/concrete.csv\")\n\n feature_names = ['cement', 'slag', 'ash', 'water', 'splast', 'coarse', 'fine', 'age']\n target_name = 'strength'\n\n # Get the X and y data from the DataFrame\n X = df[feature_names].as_matrix()\n y = df[target_name].as_matrix()\n\n # Instantiate the linear model and visualizer\n alphas = np.logspace(-10, 1, 400)\n visualizer = AlphaSelection(LassoCV(alphas=alphas))\n\n visualizer.fit(X, y)\n g = visualizer.poof(outpath=\"images/alpha_selection.png\")\n" ]
[ [ "numpy.logspace", "pandas.read_csv", "sklearn.linear_model.LassoCV" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
pdxgx/pepsickle
[ "ca4f336c8437b8ee21dfe29b8ae84cbaf8725986" ]
[ "pepsickle/model_functions.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nmodel_functions.py\n\nFor issues contact Ben Weeder ([email protected])\n\nThis script contains functions for wrapping generated proteasomal cleavage\nprediction models and handling fasta protein inputs for easy model\nimplementation.\n\"\"\"\n\nimport os\nimport warnings\nimport pepsickle.sequence_featurization_tools as sft\nfrom Bio import SeqIO\nfrom itertools import count\nimport numpy as np\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport joblib\n\n\n# sets path to stored model weights\n_model_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass DigestionSeqNet(nn.Module):\n def __init__(self):\n super().__init__()\n # self.in_nodes = 262 # for normal 13aa window\n self.in_nodes = 7 * 20 + 2\n self.drop = nn.Dropout(p=0.25)\n self.input = nn.Linear(self.in_nodes, 136)\n self.bn1 = nn.BatchNorm1d(136)\n self.fc1 = nn.Linear(136, 68)\n self.bn2 = nn.BatchNorm1d(68)\n self.fc2 = nn.Linear(68, 34)\n self.bn3 = nn.BatchNorm1d(34)\n self.out = nn.Linear(34, 2)\n\n def forward(self, x, c_prot, i_prot):\n # make sure input tensor is flattened\n\n x = x.reshape(x.shape[0], -1)\n x = torch.cat((x, c_prot.reshape(c_prot.shape[0], -1)), 1)\n x = torch.cat((x, i_prot.reshape(i_prot.shape[0], -1)), 1)\n\n x = self.drop(F.relu(self.bn1(self.input(x))))\n x = self.drop(F.relu(self.bn2(self.fc1(x))))\n x = self.drop(F.relu(self.bn3(self.fc2(x))))\n x = F.log_softmax(self.out(x), dim=1)\n\n return x\n\n\nclass DigestionMotifNet(nn.Module):\n def __init__(self):\n super().__init__()\n # self.in_nodes = 46\n self.in_nodes = (7 - 2) * 4 + 2\n self.drop = nn.Dropout(p=.25)\n self.conv = nn.Conv1d(4, 4, 3, groups=4)\n # self.fc1 = nn.Linear(78, 38)\n self.fc1 = nn.Linear(self.in_nodes, 38)\n self.bn1 = nn.BatchNorm1d(38)\n self.fc2 = nn.Linear(38, 20)\n self.bn2 = nn.BatchNorm1d(20)\n self.out = nn.Linear(20, 2)\n\n def forward(self, x, c_prot, i_prot):\n # perform convolution prior to flattening\n x = x.transpose(1, 2)\n x = self.conv(x)\n\n # make sure input tensor is flattened\n x = x.reshape(x.shape[0], -1)\n x = torch.cat((x, c_prot.reshape(c_prot.shape[0], -1)), 1)\n x = torch.cat((x, i_prot.reshape(i_prot.shape[0], -1)), 1)\n\n x = self.drop(F.relu(self.bn1(self.fc1(x))))\n x = self.drop(F.relu(self.bn2(self.fc2(x))))\n x = F.log_softmax(self.out(x), dim=1)\n\n return x\n\n\nclass EpitopeSeqNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.in_nodes = 17 * 20\n self.drop = nn.Dropout(p=0.2)\n self.input = nn.Linear(self.in_nodes, 136)\n self.bn1 = nn.BatchNorm1d(136)\n self.fc1 = nn.Linear(136, 68)\n self.bn2 = nn.BatchNorm1d(68)\n self.fc2 = nn.Linear(68, 34)\n self.bn3 = nn.BatchNorm1d(34)\n self.out = nn.Linear(34, 2)\n\n def forward(self, x):\n # make sure input tensor is flattened\n x = x.reshape(x.shape[0], -1)\n\n x = self.drop(F.relu(self.bn1(self.input(x))))\n x = self.drop(F.relu(self.bn2(self.fc1(x))))\n x = self.drop(F.relu(self.bn3(self.fc2(x))))\n x = F.log_softmax(self.out(x), dim=1)\n\n return x\n\n\nclass EpitopeMotifNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.in_nodes = (17 - 2) * 4\n self.drop = nn.Dropout(p=0.2)\n self.conv = nn.Conv1d(4, 4, 3, groups=4)\n self.fc1 = nn.Linear(self.in_nodes, 38)\n self.bn1 = nn.BatchNorm1d(38)\n self.fc2 = nn.Linear(38, 20)\n self.bn2 = nn.BatchNorm1d(20)\n self.out = nn.Linear(20, 2)\n\n def forward(self, x):\n # perform convolution prior to flattening\n x = x.transpose(1, 2)\n x = self.conv(x)\n\n # make sure input tensor is flattened\n x = x.reshape(x.shape[0], -1)\n\n x = self.drop(F.relu(self.bn1(self.fc1(x))))\n x = self.drop(F.relu(self.bn2(self.fc2(x))))\n x = F.log_softmax(self.out(x), dim=1)\n\n return x\n\n\ndef initialize_epitope_model(human_only=False):\n \"\"\"\n initializes an epitope based cleavage prediction model\n :param human_only: if true, model weights trained using human data only and\n non-human mammal data will be excluded\n :return: functional pytorch model for predicting proteasomal cleavage\n \"\"\"\n _model_path = os.path.join(_model_dir,\n \"pepsickle\",\n \"trained_model_dict.pickle\")\n _model_dict = pickle.load(open(_model_path, 'rb'))\n # set proper model file\n if human_only:\n seq_mod_state = _model_dict['human_epitope_sequence_mod']\n motif_mod_state = _model_dict['human_epitope_motif_mod']\n else:\n seq_mod_state = _model_dict['all_mammal_epitope_sequence_mod']\n motif_mod_state = _model_dict['all_mammal_epitope_motif_mod']\n\n # initialize models\n seq_mod = EpitopeSeqNet()\n motif_mod = EpitopeMotifNet()\n seq_mod.load_state_dict(seq_mod_state)\n motif_mod.load_state_dict(motif_mod_state)\n seq_mod.eval()\n motif_mod.eval()\n return [seq_mod, motif_mod]\n\n\ndef initialize_digestion_model(human_only=False):\n \"\"\"\n initializes an in-vitro digestion based cleavage prediction model\n :param human_only: if true, model weights trained using human data only and\n non-human mammal data will be excluded\n :return: functional pytorch model for predicting proteasomal cleavage\n \"\"\"\n _model_path = os.path.join(_model_dir,\n \"pepsickle\",\n \"trained_model_dict.pickle\")\n _model_dict = pickle.load(open(_model_path, 'rb'))\n\n # set proper model file\n if human_only:\n seq_mod_state = _model_dict['human_20S_digestion_sequence_mod']\n motif_mod_state = _model_dict['human_20S_digestion_motif_mod']\n else:\n seq_mod_state = _model_dict['all_mammal_20S_digestion_sequence_mod']\n motif_mod_state = _model_dict['all_mammal_20S_digestion_motif_mod']\n\n # initialize models\n seq_mod = DigestionSeqNet()\n motif_mod = DigestionMotifNet()\n seq_mod.load_state_dict(seq_mod_state)\n motif_mod.load_state_dict(motif_mod_state)\n seq_mod.eval()\n motif_mod.eval()\n return [seq_mod, motif_mod]\n\n\ndef initialize_digestion_gb_model(human_only=False):\n # TODO: add in human only/non-human only options\n _model_path = os.path.join(_model_dir,\n \"pepsickle\",\n \"model.joblib\")\n model = joblib.load(_model_path)\n return model\n\n\ndef predict_epitope_mod(model, features):\n \"\"\"\n Model wrapper that takes an epitope based model and feature array and\n returns a vector of cleavage prediction probabilities\n :param model: epitope based cleavage prediction models (list)\n :param features: array of features from generate_feature_array()\n :return: vector of cleavage probabilities\n \"\"\"\n features = torch.from_numpy(features)\n with torch.no_grad():\n p_cleavage1 = torch.exp(\n model[0](features.type(torch.FloatTensor)[:, :, :20])[:, 1]\n )\n p_cleavage2 = torch.exp(\n model\n [1](features.type(torch.FloatTensor)[:, :, 22:])[:, 1]\n )\n p_cleavage_avg = (p_cleavage1 + p_cleavage2) / 2\n\n output_p = [float(x) for x in p_cleavage_avg]\n return output_p\n\n\ndef predict_digestion_mod(model, features, proteasome_type=\"C\"):\n \"\"\"\n Model wrapper that takes an in-vitro digestion based model and feature\n array and returns a vector of cleavage prediction probabilities\n :param model: digestion based cleavage prediction model (list)\n :param features: array of features from generate_feature_array()\n :param proteasome_type: takes \"C\" to base predictions on the constitutive\n pepsickle or \"I\" to base predictions on the immunoproteasome\n :return: vector of cleavage probabilities\n \"\"\"\n # assert features.shape[2] == 24\n features = torch.from_numpy(features)\n\n if proteasome_type == \"C\":\n c_prot = torch.tensor([1] * features.shape[0]).type(torch.FloatTensor)\n i_prot = torch.tensor([0] * features.shape[0]).type(torch.FloatTensor)\n elif proteasome_type == \"I\":\n c_prot = torch.tensor([0] * features.shape[0]).type(torch.FloatTensor)\n i_prot = torch.tensor([1] * features.shape[0]).type(torch.FloatTensor)\n else:\n return ValueError(\"Proteasome type was not recognized\")\n\n with torch.no_grad():\n p1 = torch.exp(\n model[0](features.type(torch.FloatTensor)[:, :, :20], c_prot,\n i_prot)[:, 1]\n )\n p2 = torch.exp(\n model[1](features.type(torch.FloatTensor)[:, :, 22:], c_prot,\n i_prot)[:, 1]\n )\n p_cleavage = (p1 + p2) / 2\n\n output_p = [float(x) for x in p_cleavage]\n return output_p\n\n\ndef predict_digestion_gb_mod(model, features, proteasome_type=\"C\",\n shift_p=False):\n # set c/i identity for each entry\n if proteasome_type == \"C\":\n c_prot = np.array([1] * features.shape[0])\n i_prot = np.array([0] * features.shape[0])\n elif proteasome_type == \"I\":\n c_prot = np.array([0] * features.shape[0])\n i_prot = np.array([1] * features.shape[0])\n x = features[:, :, 22:].reshape(features.shape[0], -1)\n x = np.concatenate((x, c_prot.reshape(c_prot.shape[0], -1)), 1)\n x = np.concatenate((x, i_prot.reshape(i_prot.shape[0], -1)), 1)\n\n # shift based on class imbalance:\n if shift_p:\n shift = (0.5 - 0.361) # training set overall imbalance\n \"\"\"\n if proteasome_type == \"C\":\n shift = (0.5 - 0.371) # proteasome specific class imbalance\n else:\n shift = (0.5 - .343) # proteasome specific class imbalance\n \"\"\"\n else:\n shift = 0\n p = model.predict_proba(x)[:, 1]\n probs = [min((float(x) + shift), 1) for x in p]\n return probs\n\n\ndef create_windows_from_protein(protein_seq, **kwargs):\n \"\"\"\n wrapper for get_peptide_window(). takes in a protein sequence and returns\n a vector of k-merized windows.\n :param protein_seq: protein sequence\n :return: vector of protein windows\n \"\"\"\n # NOTE: last AA not made into window since c-terminal would be cleavage pos\n protein_windows = []\n for pos in range(len(protein_seq)):\n start_pos = pos + 1\n end_pos = pos + 1\n tmp_window = sft.get_peptide_window(protein_seq,\n starting_position=start_pos,\n ending_position=end_pos,\n **kwargs)\n protein_windows.append(tmp_window)\n\n return protein_windows\n\n\ndef predict_protein_cleavage_locations(protein_seq, model, protein_id=None,\n mod_type=\"epitope\",\n proteasome_type=\"C\",\n threshold=.5):\n \"\"\"\n general wrapper that accepts full protein information and returns a pandas\n data frame with cleavage site probabilities and predictions\n :param protein_id: protein identifier\n :param protein_seq: full protein sequence\n :param model: functional pytorch model to be used for predictions\n :param mod_type: whether model is using \"epitope\" or \"digestion\"\n :param proteasome_type: if digestion, the type of pepsickle to use for\n predictions (C or I)\n :param threshold: threshold used to call cleavage vs. non-cleavage\n :return: summary table for each position in the peptide\n \"\"\"\n sft.check_sequence_validity(protein_seq)\n if mod_type == \"epitope\":\n upstream = 8\n downstream = 8\n protein_windows = create_windows_from_protein(protein_seq,\n upstream=upstream,\n downstream=downstream)\n window_features = sft.generate_feature_array(protein_windows)\n preds = predict_epitope_mod(model, window_features)\n\n elif mod_type == \"in-vitro-2\":\n upstream = 3\n downstream = 3\n protein_windows = create_windows_from_protein(protein_seq,\n upstream=upstream,\n downstream=downstream)\n window_features = sft.generate_feature_array(protein_windows)\n preds = predict_digestion_mod(model, window_features,\n proteasome_type=proteasome_type)\n elif mod_type == \"in-vitro\":\n upstream = 3\n downstream = 3\n protein_windows = create_windows_from_protein(protein_seq,\n upstream=upstream,\n downstream=downstream)\n window_features = sft.generate_feature_array(protein_windows,\n normalize=True)\n preds = predict_digestion_gb_mod(model, window_features,\n proteasome_type=proteasome_type)\n\n # By definition, last position can never be a cleavage site\n preds[-1] = 0\n out_preds = [round(p, 4) for p in preds]\n positions = range(1, len(preds)+1)\n residues = list(protein_seq)\n cleave = [p > threshold for p in preds]\n prot_list = [protein_id] * len(positions)\n out_zip = zip(positions, residues, out_preds, cleave, prot_list)\n out = [i for i in out_zip]\n return out\n\n\ndef format_protein_cleavage_locations(protein_preds):\n out_lines = []\n for item in protein_preds:\n line = \"{}\\t{}\\t{}\\t{}\\t{}\".format(item[0], item[1], item[2], item[3], item[4])\n out_lines.append(line)\n return out_lines\n\n\ndef process_fasta(fasta_file, cleavage_model, verbose=False, **kwargs):\n \"\"\"\n handles fasta file path and returns pandas df with cleavage prediction\n results\n :param fasta_file: path to the fasta file that needs processed\n :param cleavage_model: active model or model initialization to be used\n :param verbose: flag to print out progress when list of proteins is given\n :param out_file_location: output location where results are written.\n :param kwargs: parameters to be passed to the cleavage prediction model\n :return: pandas dataframe with cleavage predictions\n \"\"\"\n try:\n protein_list = SeqIO.to_dict(SeqIO.parse(fasta_file, \"fasta\"))\n except ValueError:\n warnings.warn(\"Multiple proteins are using the same identifier, \"\n \"it is recommended to use unique identifiers for \"\n \"each protein input. ID numbers will be appended to \"\n \"provide unique protein ID's\")\n pass\n c = count(start=1)\n protein_list = SeqIO.to_dict(SeqIO.parse(fasta_file, \"fasta\"),\n key_function=lambda x: f\"{x.id}_{next(c)}\")\n\n end = len(protein_list)\n master_lines = [\"position\\tresidue\\tcleav_prob\\tcleaved\\tprotein_id\"]\n for i, protein_id in enumerate(protein_list):\n if i % 100 == 0 and verbose:\n print(\"completed:\", i, \"of\", end)\n tmp_out = predict_protein_cleavage_locations(\n protein_id=protein_id, protein_seq=protein_list[protein_id].upper(),\n model=cleavage_model, **kwargs)\n\n for line in format_protein_cleavage_locations(tmp_out):\n master_lines.append(line)\n\n return master_lines\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.from_numpy", "torch.tensor", "torch.nn.Linear", "torch.no_grad", "torch.nn.Conv1d", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ayshwaryas/pegasus
[ "b44f0f7c9cdb0da89de385ab6074a2b22bd29d0e" ]
[ "pegasus/annotate_cluster/annotate_cluster.py" ]
[ "import numpy as np\nimport pandas as pd\nimport json\n\nfrom sys import stdout\nfrom natsort import natsorted\nfrom typing import List, Dict, Union\nfrom anndata import AnnData\nfrom io import IOBase\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom pegasusio import timer, MultimodalData, UnimodalData\n\n\nclass CellType:\n def __init__(self, name: str, ignore_nonde: bool = False):\n self.name = name\n self.score = self.avgp = 0.0\n self.weak_support = []\n self.strong_support = []\n self.subtypes = None\n self.ignore_nonde = ignore_nonde\n\n def evaluate(\n self,\n obj: dict,\n de_up: pd.DataFrame,\n de_down: pd.DataFrame,\n thre: float,\n ):\n \"\"\" Calculate score for matching a cluster with a putative cell type.\n \"\"\"\n self.score = self.avgp = 0.0\n self.weak_support = []\n self.strong_support = []\n\n nump = 0\n for marker_set in obj[\"markers\"]:\n numer = 0.0\n denom = len(marker_set[\"genes\"]) * 2.0\n if denom == 0.0:\n continue\n\n for marker in marker_set[\"genes\"]:\n sign = marker[-1]\n gsym = marker[:-1]\n\n if sign == \"+\":\n if gsym in de_up.index:\n fc = de_up.at[gsym, \"fc\"]\n percent = de_up.at[gsym, \"percent\"]\n self.avgp += percent\n nump += 1\n\n if fc >= thre:\n numer += 2.0\n self.strong_support.append(\n (marker, f\"{percent:.2f}%\")\n )\n else:\n numer += 1.0 + (fc - 1.0) / (thre - 1.0)\n self.weak_support.append(\n (marker, f\"{percent:.2f}%\")\n )\n else:\n assert sign == \"-\"\n if gsym not in de_up.index:\n if gsym in de_down.index:\n fc = (\n (1.0 / de_down.at[gsym, \"fc\"])\n if de_down.at[gsym, \"fc\"] > 0.0\n else np.inf\n )\n percent = de_down.at[gsym, \"percent\"]\n if fc >= thre:\n numer += 2.0\n self.strong_support.append(\n (marker, f\"{percent:.2f}%\")\n )\n else:\n numer += 1.0 + (fc - 1.0) / (thre - 1.0)\n self.weak_support.append(\n (marker, f\"{percent:.2f}%\")\n )\n elif not self.ignore_nonde:\n numer += 1.0\n self.weak_support.append((marker, \"N/A\"))\n\n self.score += numer / denom * marker_set[\"weight\"]\n\n self.score = (\n self.score / obj[\"denominator\"] if obj[\"denominator\"] > 0.0 else 0.0\n )\n if nump > 0:\n self.avgp /= nump\n\n def __repr__(self):\n res = f\"name: {self.name}; score: {self.score:.2f}; average marker percentage: {self.avgp:.2f}%\"\n if len(self.strong_support) > 0:\n res += \"; strong support: {0}\".format(\n \",\".join([f\"({x[0]},{x[1]})\" for x in self.strong_support])\n )\n if len(self.weak_support) > 0:\n res += \"; weak support: {0}\".format(\n \",\".join([f\"({x[0]},{x[1]})\" for x in self.weak_support])\n )\n\n return res\n\n\nclass Annotator:\n def __init__(self, markers: Dict, genes: List[str]) -> None:\n self.object = markers\n self.recalibrate(self.object, genes)\n\n def recalibrate(self, obj: dict, genes: List[str]) -> None:\n \"\"\" Remove markers that are not expressed (not in genes) and calculate partial weights for existing genes.\n \"\"\"\n for celltype in obj[\"cell_types\"]:\n denom = 0.0\n for marker_set in celltype[\"markers\"]:\n markers = marker_set[\"genes\"]\n s = len(markers)\n marker_set[\"genes\"] = [x for x in markers if x[:-1] in genes]\n new_s = len(marker_set[\"genes\"])\n marker_set[\"weight\"] = marker_set[\"weight\"] / s * new_s\n denom += marker_set[\"weight\"]\n celltype[\"denominator\"] = denom\n sub_obj = celltype.get(\"subtypes\", None)\n if sub_obj is not None:\n self.recalibrate(sub_obj, genes)\n\n def evaluate(\n self,\n de_up: pd.DataFrame,\n de_down: pd.DataFrame,\n fc_thre: float = 1.5,\n threshold: float = 0.5,\n ignore_nonde: bool = False,\n obj: dict = None,\n ):\n \"\"\" Evaluate a cluster to determine its putative cell type.\n \"\"\"\n if obj is None:\n obj = self.object\n\n results = []\n for celltype in obj[\"cell_types\"]:\n ct = CellType(celltype[\"name\"], ignore_nonde=ignore_nonde)\n ct.evaluate(celltype, de_up, de_down, fc_thre)\n if ct.score >= threshold:\n sub_obj = celltype.get(\"subtypes\", None)\n if sub_obj is not None:\n ct.subtypes = self.evaluate(\n de_up,\n de_down,\n fc_thre=fc_thre,\n ignore_nonde=ignore_nonde,\n obj=sub_obj,\n )\n results.append(ct)\n\n results.sort(key=lambda x: x.score, reverse=True)\n\n return results\n\n def report(\n self,\n fout: IOBase,\n ct_list: List[\"CellType\"],\n space: int = 4,\n ) -> None:\n \"\"\" Write putative cell type reports to fout.\n \"\"\"\n for ct in ct_list:\n fout.write(\" \" * space + str(ct) + \"\\n\")\n if ct.subtypes is not None:\n self.report(fout, ct.subtypes, space + 4)\n\n\ndef infer_cluster_names(\n cell_type_dict: Dict[str, List[\"CellType\"]], threshold: float = 0.5\n) -> List[str]:\n \"\"\"Decide cluster names based on cell types automatically.\n\n Parameters\n ----------\n cell_type_dict: ``Dict[str, List[\"CellType\"]]``\n Python dictionary of cell type list for each cluster. This is the output of ``pg.infer_cell_types``.\n\n threshold: ``float``, optional, default: ``0.5``\n A threshold for cell type result reported. It should be a real number between ``0.0`` and ``1.0``.\n\n Returns\n -------\n ``List[str]``\n A list of cluster names decided by their corresponding cell types. The order is consistent with clusters.\n\n Examples\n --------\n >>> cell_type_dict = pg.infer_cell_types(adata, markers = 'human_immune', de_test = 't')\n >>> cluster_names = pg.infer_cluster_names(cell_type_dict)\n \"\"\"\n cluster_ids = natsorted(cell_type_dict.keys())\n names = []\n name_dict = dict()\n for cluster_id in cluster_ids:\n ct_list = cell_type_dict[cluster_id]\n\n if len(ct_list) == 0 or ct_list[0].score < threshold:\n cell_name = cluster_id\n else:\n ct = ct_list[0]\n while ct.subtypes is not None and len(ct.subtypes) > 0 and ct.subtypes[0].score >= threshold:\n ct = ct.subtypes[0]\n cell_name = ct.name\n\n if cell_name in name_dict:\n name_dict[cell_name] += 1\n cell_name = cell_name + \"-\" + str(name_dict[cell_name])\n else:\n name_dict[cell_name] = 1\n\n names.append(cell_name)\n\n return names\n\n\ndef infer_cell_types(\n data: Union[MultimodalData, UnimodalData, AnnData],\n markers: Union[str, Dict],\n de_test: str = \"mwu\",\n de_alpha: float = 0.05,\n de_key: str = \"de_res\",\n threshold: float = 0.5,\n ignore_nonde: bool = False,\n output_file: str = None,\n) -> Dict[str, List[\"CellType\"]]:\n \"\"\"Infer putative cell types for each cluster using legacy markers.\n\n Parameters\n ----------\n\n data : ``MultimodalData``, ``UnimodalData``, or ``anndata.AnnData``.\n Data structure of count matrix and DE analysis results.\n\n markers : ``str`` or ``Dict``\n * If ``str``, it is a string representing a comma-separated list; each element in the list\n * either refers to a JSON file containing legacy markers, or predefined markers\n * ``'human_immune'`` for human immune cells;\n * ``'mouse_immune'`` for mouse immune cells;\n * ``'human_brain'`` for human brain cells;\n * ``'mouse_brain'`` for mouse brain cells;\n * ``'human_lung'`` for human lung cells.\n * If ``Dict``, it refers to a Python dictionary describing the markers.\n\n de_test: ``str``, optional, default: ``\"mwu\"``\n pegasus determines cell types using DE test results. This argument indicates which DE test result to use, can be either ``'t'``, ``'fisher'`` or ``'mwu'``.\n By default, it uses ``'mwu'``.\n\n de_alpha: ``float``, optional, default: ``0.05``\n False discovery rate for controling family-wide error.\n\n de_key : ``str``, optional, default: ``\"de_res\"``\n The keyword in ``data.varm`` that stores DE analysis results.\n\n threshold : ``float``, optional, defaut: ``0.5``\n Only report putative cell types with a score larger than or equal to ``threshold``.\n\n ignore_nonde: ``bool``, optional, default: ``False``\n Do not consider non DE genes as weak negative markers.\n\n output_file: ``str``, optional, default: ``None``\n File name of output cluster annotation. If ``None``, do not write to any file.\n\n Returns\n -------\n ``Dict[str, List[\"CellType\"]]``\n Python dictionary with cluster ID's being keys, and their corresponding cell type lists sortec by scores being values.\n\n Examples\n --------\n >>> cell_type_dict = pg.infer_cell_types(adata, markers = 'human_immune,human_brain')\n \"\"\"\n\n if output_file is not None:\n fout = open(output_file, \"w\")\n\n import pkg_resources\n predefined_markers = dict(\n human_immune=\"human_immune_cell_markers.json\",\n mouse_immune=\"mouse_immune_cell_markers.json\",\n human_brain=\"human_brain_cell_markers.json\",\n mouse_brain=\"mouse_brain_cell_markers.json\",\n human_lung=\"human_lung_cell_markers.json\",\n )\n\n if isinstance(markers, str):\n tokens = markers.split(',')\n markers = None\n for token in tokens:\n if token in predefined_markers:\n token = pkg_resources.resource_filename(\n \"pegasus.annotate_cluster\", predefined_markers[token]\n )\n with open(token) as fin:\n tmp_dict = json.load(fin)\n if markers is None:\n markers = tmp_dict\n else:\n markers[\"title\"] = f\"{markers['title']}/{tmp_dict['title']}\"\n markers[\"cell_types\"].extend(tmp_dict[\"cell_types\"])\n\n assert isinstance(markers, dict)\n anno = Annotator(markers, data.var_names)\n\n test2metric = {\"mwu\": \"auroc\", \"t\": \"log2FC\", \"fisher\": \"percentage_fold_change\"}\n metric = test2metric[de_test]\n thre = 0.5 if de_test == \"mwu\" else 0.0\n coln = \"percentage_fold_change\" if de_test == \"fisher\" else \"log2FC\"\n\n clusts = natsorted(\n [\n x.rpartition(\":\")[0]\n for x in data.varm[de_key].dtype.names\n if x.endswith(\":auroc\")\n ]\n )\n cell_type_results = {}\n for clust_id in clusts:\n idx = data.varm[de_key][f\"{clust_id}:{de_test}_qval\"] <= de_alpha\n\n idx_up = idx & (data.varm[de_key][f\"{clust_id}:{metric}\"] > thre)\n idx_down = idx & (data.varm[de_key][f\"{clust_id}:{metric}\"] < thre)\n assert idx_up.sum() + idx_down.sum() == idx.sum()\n\n cols = [f\"{clust_id}:{coln}\", f\"{clust_id}:percentage\"]\n de_up = pd.DataFrame(\n data=data.varm[de_key][cols][idx_up], index=data.var_names[idx_up]\n )\n de_up.rename(columns={cols[0]: \"fc\", cols[1]: \"percent\"}, inplace=True)\n de_down = pd.DataFrame(\n data=data.varm[de_key][cols][idx_down], index=data.var_names[idx_down]\n )\n de_down.rename(columns={cols[0]: \"fc\", cols[1]: \"percent\"}, inplace=True)\n\n if de_test != \"fisher\":\n de_up[\"fc\"] = 2.0 ** de_up[\"fc\"]\n de_down[\"fc\"] = 2.0 ** de_down[\"fc\"]\n\n results = anno.evaluate(de_up, de_down, threshold=threshold, ignore_nonde=ignore_nonde)\n\n if output_file is not None:\n fout.write(f\"Cluster {clust_id}:\\n\")\n anno.report(fout, results)\n\n cell_type_results[clust_id] = results\n\n if output_file is not None:\n fout.close()\n\n return cell_type_results\n\n\ndef annotate(\n data: Union[MultimodalData, UnimodalData,AnnData],\n name: str,\n based_on: str,\n anno_dict: Union[Dict[str, str], List[str]],\n) -> None:\n \"\"\"Add annotation to AnnData obj.\n\n Parameters\n ----------\n\n data : ``MultimodalData``, ``UnimodalData``, or ``anndata.AnnData``\n Gene-count matrix with DE analysis information.\n name : `str`\n Name of the new annotation in data.obs.\n based_on : `str`\n Name of the attribute the cluster ids coming from.\n anno_dict : `Dict[str, str]` or `List[str]`\n Dictionary mapping from cluster id to cell type.\n If it is a List, map cell types to cluster ids one to one in correspondence.\n\n Returns\n -------\n\n None\n\n Examples\n --------\n >>> pg.annotate(data, 'anno', 'spectral_louvain_labels', {'1': 'T cell', '2': 'B cell'})\n >>> pg.annotate(data, 'anno', 'louvain_labels', ['T cell', 'B cell'])\n \"\"\"\n if isinstance(anno_dict, list):\n cluster_ids = data.obs[based_on].cat.categories.values.astype('str')\n anno_dict = dict(zip(cluster_ids, anno_dict))\n\n data.obs[name] = [anno_dict[x] for x in data.obs[based_on]]\n\n@timer(logger=logger)\ndef run_annotate_cluster(\n input_file: str,\n output_file: str,\n markers: str,\n de_test: str,\n de_alpha: float = 0.05,\n de_key: str = \"de_res\",\n threshold: float = 0.5,\n ignore_nonde: bool = False,\n) -> None:\n \"\"\" For command line use.\n \"\"\"\n from pegasusio import read_input\n\n data = read_input(input_file, mode=\"r\")\n infer_cell_types(\n data,\n markers,\n de_test,\n de_alpha=de_alpha,\n de_key=de_key,\n threshold=threshold,\n ignore_nonde=ignore_nonde,\n output_file=output_file,\n )\n\n\ndef annotate_data_object(input_file: str, annotation: str) -> None:\n \"\"\" For command line use.\n annotation: anno_name:clust_name:cell_type1;...cell_typen\n \"\"\"\n from pegasusio import read_input, write_output\n\n data = read_input(input_file, mode=\"r\")\n anno_name, clust_name, anno_str = annotation.split(\":\")\n anno_dict = {str(i + 1): x for i, x in enumerate(anno_str.split(\";\"))}\n annotate(data, anno_name, clust_name, anno_dict)\n write_output(data, input_file)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
csdenboer/hummingbot
[ "8a799675a325ebdbb74d76b2a44472cdbf74d691" ]
[ "hummingbot/connector/exchange/radar_relay/radar_relay_api_order_book_data_source.py" ]
[ "#!/usr/bin/env python\n\nimport asyncio\nimport aiohttp\nimport logging\nimport pandas as pd\nfrom typing import (\n AsyncIterable,\n Dict,\n List,\n Optional,\n)\nimport re\nimport time\nimport ujson\nimport websockets\nfrom websockets.exceptions import ConnectionClosed\n\nfrom hummingbot.core.data_type.order_book import OrderBook\nfrom hummingbot.connector.exchange.radar_relay.radar_relay_order_book import RadarRelayOrderBook\nfrom hummingbot.connector.exchange.radar_relay.radar_relay_active_order_tracker import RadarRelayActiveOrderTracker\nfrom hummingbot.connector.exchange.radar_relay.radar_relay_order_book_message import RadarRelayOrderBookMessage\nfrom hummingbot.core.utils import async_ttl_cache\nfrom hummingbot.logger import HummingbotLogger\nfrom hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource\nfrom hummingbot.core.data_type.order_book_message import OrderBookMessage\n\nTRADING_PAIR_FILTER = re.compile(r\"(WETH|DAI)$\")\n\nREST_BASE_URL = \"https://api.radarrelay.com/v3\"\nTOKENS_URL = f\"{REST_BASE_URL}/tokens\"\nMARKETS_URL = f\"{REST_BASE_URL}/markets\"\nWS_URL = \"wss://ws.radarrelay.com/v3\"\n\n\nclass RadarRelayAPIOrderBookDataSource(OrderBookTrackerDataSource):\n\n MESSAGE_TIMEOUT = 30.0\n PING_TIMEOUT = 10.0\n\n _rraobds_logger: Optional[HummingbotLogger] = None\n _client: Optional[aiohttp.ClientSession] = None\n\n @classmethod\n def logger(cls) -> HummingbotLogger:\n if cls._rraobds_logger is None:\n cls._rraobds_logger = logging.getLogger(__name__)\n return cls._rraobds_logger\n\n def __init__(self, trading_pairs: List[str]):\n super().__init__(trading_pairs)\n self.order_book_create_function = lambda: RadarRelayOrderBook()\n\n @classmethod\n def http_client(cls) -> aiohttp.ClientSession:\n if cls._client is None:\n if not asyncio.get_event_loop().is_running():\n raise EnvironmentError(\"Event loop must be running to start HTTP client session.\")\n cls._client = aiohttp.ClientSession()\n return cls._client\n\n @classmethod\n async def get_all_token_info(cls) -> Dict[str, any]:\n \"\"\"\n Returns all token information\n \"\"\"\n client: aiohttp.ClientSession = cls.http_client()\n async with client.get(TOKENS_URL) as response:\n response: aiohttp.ClientResponse = response\n if response.status != 200:\n raise IOError(f\"Error fetching token info. HTTP status is {response.status}.\")\n data = await response.json()\n return {d[\"address\"]: d for d in data}\n\n @classmethod\n @async_ttl_cache(ttl=60 * 30, maxsize=1)\n async def get_active_exchange_markets(cls) -> pd.DataFrame:\n \"\"\"\n Returned data frame should have trading pair as index and include usd volume, baseAsset and quoteAsset\n \"\"\"\n client: aiohttp.ClientSession = cls.http_client()\n async with client.get(f\"{MARKETS_URL}?include=ticker,stats\") as response:\n response: aiohttp.ClientResponse = response\n if response.status != 200:\n raise IOError(f\"Error fetching active Radar Relay markets. HTTP status is {response.status}.\")\n data = await response.json()\n data: List[Dict[str, any]] = [\n {**item, **{\"baseAsset\": item[\"id\"].split(\"-\")[0], \"quoteAsset\": item[\"id\"].split(\"-\")[1]}}\n for item in data\n ]\n all_markets: pd.DataFrame = pd.DataFrame.from_records(data=data, index=\"id\")\n\n quote_volume: List[float] = []\n for row in all_markets.itertuples():\n base_volume: float = float(row.stats[\"volume24Hour\"])\n quote_volume.append(base_volume)\n\n all_markets.loc[:, \"volume\"] = quote_volume\n return all_markets.sort_values(\"USDVolume\", ascending=False)\n\n @staticmethod\n async def fetch_trading_pairs() -> List[str]:\n try:\n trading_pairs = set()\n page_count = 1\n while True:\n async with aiohttp.ClientSession() as client:\n async with client.get(f\"{MARKETS_URL}?perPage=100&page={page_count}\", timeout=10) \\\n as response:\n if response.status == 200:\n markets = await response.json()\n new_trading_pairs = set(map(lambda details: details.get('id'), markets))\n if len(new_trading_pairs) == 0:\n break\n else:\n trading_pairs = trading_pairs.union(new_trading_pairs)\n page_count += 1\n trading_pair_list: List[str] = []\n for raw_trading_pair in trading_pairs:\n trading_pair_list.append(raw_trading_pair)\n return trading_pair_list\n else:\n break\n except Exception:\n # Do nothing if the request fails -- there will be no autocomplete for radar trading pairs\n pass\n\n return []\n\n @staticmethod\n async def get_snapshot(client: aiohttp.ClientSession, trading_pair: str) -> Dict[str, any]:\n async with client.get(f\"{REST_BASE_URL}/markets/{trading_pair}/book\") as response:\n response: aiohttp.ClientResponse = response\n if response.status != 200:\n raise IOError(f\"Error fetching Radar Relay market snapshot for {trading_pair}. \"\n f\"HTTP status is {response.status}.\")\n return await response.json()\n\n async def get_new_order_book(self, trading_pair: str) -> OrderBook:\n async with aiohttp.ClientSession() as client:\n snapshot: Dict[str, any] = await self.get_snapshot(client, trading_pair)\n snapshot_timestamp: float = time.time()\n snapshot_msg: RadarRelayOrderBookMessage = RadarRelayOrderBook.snapshot_message_from_exchange(\n snapshot,\n snapshot_timestamp,\n metadata={\"trading_pair\": trading_pair}\n )\n\n radar_relay_order_book: OrderBook = self.order_book_create_function()\n radar_relay_active_order_tracker: RadarRelayActiveOrderTracker = RadarRelayActiveOrderTracker()\n bids, asks = radar_relay_active_order_tracker.convert_snapshot_message_to_order_book_row(\n snapshot_msg)\n radar_relay_order_book.apply_snapshot(bids, asks, snapshot_msg.update_id)\n return radar_relay_order_book\n\n async def _inner_messages(self,\n ws: websockets.WebSocketClientProtocol) -> AsyncIterable[str]:\n # Terminate the recv() loop as soon as the next message timed out, so the outer loop can reconnect.\n try:\n while True:\n try:\n msg: str = await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT)\n yield msg\n except asyncio.TimeoutError:\n try:\n pong_waiter = await ws.ping()\n await asyncio.wait_for(pong_waiter, timeout=self.PING_TIMEOUT)\n except asyncio.TimeoutError:\n raise\n except asyncio.TimeoutError:\n self.logger().warning(\"WebSocket ping timed out. Going to reconnect...\")\n return\n except ConnectionClosed:\n return\n finally:\n await ws.close()\n\n async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):\n # Trade messages are received from the order book web socket\n pass\n\n async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):\n while True:\n try:\n trading_pairs: List[str] = self._trading_pairs\n async with websockets.connect(WS_URL) as ws:\n ws: websockets.WebSocketClientProtocol = ws\n for trading_pair in trading_pairs:\n request: Dict[str, str] = {\n \"type\": \"SUBSCRIBE\",\n \"topic\": \"BOOK\",\n \"market\": trading_pair\n }\n await ws.send(ujson.dumps(request))\n async for raw_msg in self._inner_messages(ws):\n msg = ujson.loads(raw_msg)\n # Valid Diff messages from RadarRelay have action key\n if \"action\" in msg:\n diff_msg: RadarRelayOrderBookMessage = RadarRelayOrderBook.diff_message_from_exchange(\n msg, time.time())\n output.put_nowait(diff_msg)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error with WebSocket connection. Retrying after 30 seconds...\",\n exc_info=True)\n await asyncio.sleep(30.0)\n\n async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):\n while True:\n try:\n trading_pairs: List[str] = self._trading_pairs\n client: aiohttp.ClientSession = self.http_client()\n for trading_pair in trading_pairs:\n try:\n snapshot: Dict[str, any] = await self.get_snapshot(client, trading_pair)\n snapshot_timestamp: float = time.time()\n snapshot_msg: OrderBookMessage = RadarRelayOrderBook.snapshot_message_from_exchange(\n snapshot,\n snapshot_timestamp,\n metadata={\"trading_pair\": trading_pair}\n )\n output.put_nowait(snapshot_msg)\n self.logger().debug(f\"Saved order book snapshot for {trading_pair}\")\n\n await asyncio.sleep(5.0)\n\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error.\", exc_info=True)\n await asyncio.sleep(5.0)\n this_hour: pd.Timestamp = pd.Timestamp.utcnow().replace(minute=0, second=0, microsecond=0)\n next_hour: pd.Timestamp = this_hour + pd.Timedelta(hours=1)\n delta: float = next_hour.timestamp() - time.time()\n await asyncio.sleep(delta)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error.\", exc_info=True)\n await asyncio.sleep(5.0)\n" ]
[ [ "pandas.DataFrame.from_records", "pandas.Timedelta", "pandas.Timestamp.utcnow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ElnazP/MonoAlg3D_C
[ "3e81952771e8747f8fb713c31225b50117c61a2d" ]
[ "scripts/elnaz/plot_comparison_potential.py" ]
[ "import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef read_transmembrane_potential(input_file, dt, print_rate):\n data = np.genfromtxt(input_file)\n n = len(data)\n ms_each_step = dt*print_rate\n\n end_simulation = n / ms_each_step\n\n timesteps = np.arange(0,n)*ms_each_step\n vms = data\n\n return timesteps, vms\n\n\ndef plot_transmembrane_potential(t1, v1, t2, v2):\n\n plt.plot(t1, v1, label=\"Pass\", c=\"green\", linewidth=1.0)\n plt.plot(t2, v2, label=\"Block\", c=\"red\", linewidth=1.0)\n\n plt.grid()\n plt.xlabel(\"t (ms)\",fontsize=15)\n plt.ylabel(\"V (mV)\",fontsize=15)\n plt.title(\"Action potential\",fontsize=14)\n plt.legend(loc=0,fontsize=14)\n #plt.savefig(\"output/comparison-aps.pdf\")\n plt.show()\n\n\ndef main():\n\t\n if len(sys.argv) != 5:\n print(\"-------------------------------------------------------------------------\")\n print(\"Usage:> python %s <input_file_1> <input_file_2> <dt> <print_rate>\" % sys.argv[0])\n print(\"-------------------------------------------------------------------------\")\n print(\"<input_file_1> = Input file with the AP from the first simulation\")\n print(\"<input_file_2> = Input file with the AP from the second simulation\")\n print(\"<dt> = Timestep value used for the simulation\")\n print(\"<print_rate> = Print rate used for the simulation\")\n print(\"-------------------------------------------------------------------------\")\n return 1\n\n input_file_1 = sys.argv[1]\n input_file_2 = sys.argv[2]\n dt = float(sys.argv[3])\n print_rate = int(sys.argv[4])\n\n t1, vm1 = read_transmembrane_potential(input_file_1,dt,print_rate)\n t2, vm2 = read_transmembrane_potential(input_file_2,dt,print_rate)\n\n plot_transmembrane_potential(t1,vm1,t2,vm2)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.arange", "numpy.genfromtxt", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
predict-drone/drone-control
[ "2054ccec2f5e6c002727a5561b494a1046484504" ]
[ "simulator/ply.py" ]
[ "# MIT LICENSE\r\n# https://github.com/bponsler/kinectToPly/blob/master/kinectToPly.py\r\nimport numpy as np\r\n\r\n\r\nclass Ply(object):\r\n '''The Ply class provides the ability to write a point cloud represented\r\n by two arrays: an array of points (num points, 3), and an array of colors\r\n (num points, 3) to a PLY file.\r\n '''\r\n\r\n def __init__(self, points, colors):\r\n '''\r\n * points -- The matrix of points (num points, 3)\r\n * colors -- The matrix of colors (num points, 3)\r\n '''\r\n self.__points = points\r\n self.__colors = colors\r\n\r\n def write(self, filename):\r\n '''Write the point cloud data to a PLY file of the given name.\r\n * filename -- The PLY file\r\n '''\r\n # Write the headers\r\n lines = self.__getLinesForHeader()\r\n\r\n fd = open(filename, \"w\")\r\n for line in lines:\r\n fd.write(\"%s\\n\" % line)\r\n\r\n # Write the points\r\n self.__writePoints(fd, self.__points, self.__colors)\r\n\r\n fd.close()\r\n\r\n def __getLinesForHeader(self):\r\n '''Get the list of lines for the PLY header.'''\r\n lines = [\r\n \"ply\",\r\n \"format ascii 1.0\",\r\n \"comment generated by: kinectToPly\",\r\n \"element vertex %s\" % len(self.__points),\r\n \"property float x\",\r\n \"property float y\",\r\n \"property float z\",\r\n \"property uchar red\",\r\n \"property uchar green\",\r\n \"property uchar blue\",\r\n \"end_header\",\r\n ]\r\n\r\n return lines\r\n\r\n def __writePoints(self, fd, points, colors):\r\n '''Write the point cloud points to a file.\r\n * fd -- The file descriptor\r\n * points -- The matrix of points (num points, 3)\r\n * colors -- The matrix of colors (num points, 3)\r\n '''\r\n # Stack the two arrays together\r\n stacked = np.column_stack((points, colors))\r\n\r\n # Write the array to the file\r\n np.savetxt(\r\n fd,\r\n stacked,\r\n delimiter='\\n',\r\n fmt=\"%f %f %f %d %d %d\")\r\n\r\n\r\ndef write_xyz_rgb_as_ply(point_cloud, rgb_image, path):\r\n \"\"\"Write a point cloud with associated rgb image to a ply file\r\n\r\n # Arguments\r\n\r\n point_cloud: xyz point cloud in format [height, width, channels]\r\n rgb_image: uint8 image in format [height, width, channels]\r\n path: Where to save the file, ex: '/path/to/folder/file.ply'\r\n \"\"\"\r\n xyz = point_cloud.reshape([point_cloud.size/3, 3])\r\n rgb = np.squeeze(rgb_image).reshape([point_cloud.size/3, 3])\r\n ply = Ply(xyz, rgb)\r\n ply.write(path)\r\n" ]
[ [ "numpy.savetxt", "numpy.squeeze", "numpy.column_stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Vakihito/multilogue-net
[ "fd67c78b4b6797c0e2aeae858023d4b67703d62d" ]
[ "train_regression.py" ]
[ "import numpy as np, torch, torch.nn as nn, torch.optim as optim\r\nimport argparse, time, pandas as pd, os\r\nfrom tqdm import tqdm\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.utils.data.sampler import SubsetRandomSampler\r\nfrom sklearn.metrics import mean_absolute_error\r\nfrom scipy.stats import pearsonr\r\nfrom model import RegressionModel, MaskedMSELoss, BiModalAttention\r\nfrom dataloader import MOSEIRegression\r\n\r\nnp.random.seed(393)\r\ntorch.cuda.device([0])\r\n\r\ndef get_train_valid_sampler(trainset, valid=0.1):\r\n size = len(trainset)\r\n idx = range(size)\r\n split = int(valid*size)\r\n return SubsetRandomSampler(idx[split:]), SubsetRandomSampler(idx[:split])\r\n\r\ndef get_MOSEI_loaders(path, batch_size=128, valid=0.1, num_workers=0, pin_memory=False):\r\n trainset = MOSEIRegression(path=path)\r\n train_sampler, valid_sampler = get_train_valid_sampler(trainset, valid)\r\n train_loader = DataLoader(trainset, batch_size=batch_size, sampler=train_sampler, collate_fn=trainset.collate_fn, num_workers=num_workers, pin_memory=pin_memory)\r\n valid_loader = DataLoader(trainset, batch_size=batch_size, sampler=valid_sampler, collate_fn=trainset.collate_fn, num_workers=num_workers, pin_memory=pin_memory)\r\n testset = MOSEIRegression(path=path, train=False)\r\n test_loader = DataLoader(testset, batch_size=batch_size, collate_fn=testset.collate_fn, num_workers=num_workers, pin_memory=pin_memory)\r\n return train_loader, valid_loader, test_loader\r\n\r\ndef train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, train=False): \r\n losses, preds, labels, masks = [], [], [], []\r\n assert not train or optimizer!=None\r\n if train:\r\n model.train()\r\n else:\r\n model.eval()\r\n for data in dataloader:\r\n if train:\r\n optimizer.zero_grad()\r\n textf, visuf, acouf, qmask, umask, label = [d.cuda() for d in data] if cuda else data\r\n pred = model(textf, acouf, visuf, textf, qmask, umask) \r\n labels_ = label.view(-1) \r\n umask_ = umask.view(-1) \r\n loss = loss_function(pred, labels_, umask_)\r\n preds.append(pred.data.cpu().numpy())\r\n labels.append(labels_.data.cpu().numpy())\r\n masks.append(umask_.cpu().numpy())\r\n losses.append(loss.item()*masks[-1].sum())\r\n if train:\r\n loss.backward()\r\n optimizer.step()\r\n if preds!=[]:\r\n preds = np.concatenate(preds)\r\n labels = np.concatenate(labels)\r\n masks = np.concatenate(masks)\r\n else:\r\n return float('nan'), float('nan'), float('nan'), [], [], []\r\n avg_loss = round(np.sum(losses)/np.sum(masks),4)\r\n mae = round(mean_absolute_error(labels,preds,sample_weight=masks),4)\r\n pred_lab = pd.DataFrame(list(filter(lambda x: x[2]==1, zip(labels, preds, masks))))\r\n pear = round(pearsonr(pred_lab[0], pred_lab[1])[0], 4)\r\n return avg_loss, mae, pear, labels, preds, masks\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(\"Trains a regression model for sentiment data with output ranging from -3 to +3 indicating sentiment\")\r\n parser.add_argument('--no-cuda', action='store_true', default=False, help='does not use GPU')\r\n parser.add_argument('--lr', type=float, default=0.0001, metavar='LR', help='learning rate')\r\n parser.add_argument('--l2', type=float, default=0.0001, metavar='L2', help='L2 regularization weight')\r\n parser.add_argument('--rec-dropout', type=float, default=0.1, metavar='rec_dropout', help='rec_dropout rate')\r\n parser.add_argument('--dropout', type=float, default=0.25, metavar='dropout', help='dropout rate')\r\n parser.add_argument('--batch-size', type=int, default=128, metavar='BS', help='batch size')\r\n parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number of epochs')\r\n parser.add_argument('--log_dir', type=str, default='logs/mosei_regression', help='Directory for tensorboard logs')\r\n args = parser.parse_args()\r\n os.makedirs(args.log_dir, exist_ok = True)\r\n writer = SummaryWriter(args.log_dir)\r\n print(args)\r\n\r\n # Run on either GPU or CPU\r\n args.cuda = torch.cuda.is_available() and not args.no_cuda\r\n if args.cuda:\r\n print('Running on GPU')\r\n else:\r\n print('Running on CPU')\r\n print(\"Tensorboard logs in \" + args.log_dir)\r\n\r\n batch_size = args.batch_size\r\n n_classes = 6\r\n cuda = args.cuda\r\n n_epochs = args.epochs\r\n D_m_text, D_m_audio, D_m_video, D_m_context = 300, 384, 35, 300\r\n D_g, D_p, D_e, D_h, D_a = 150, 150, 100, 100, 100\r\n\r\n # Instantiate model\r\n model = RegressionModel(D_m_text, D_m_audio, D_m_video, D_m_context, D_g, D_p, D_e, D_h, dropout_rec=args.rec_dropout, dropout=args.dropout)\r\n\r\n if cuda:\r\n model.cuda()\r\n loss_function = MaskedMSELoss()\r\n\r\n # Get optimizer and relevant dataloaders\r\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)\r\n train_loader, valid_loader, test_loader = get_MOSEI_loaders('./data/regression.pkl', valid=0.0, batch_size=batch_size, num_workers=0)\r\n best_loss, best_label, best_pred, best_mask, best_pear = None, None, None, None, None\r\n\r\n # Training loop\r\n for e in tqdm(range(n_epochs), desc = 'MOSEI Regression'):\r\n train_loss, train_mae, train_pear,_,_,_ = train_or_eval_model(model, loss_function, train_loader, e, optimizer, True)\r\n test_loss, test_mae, test_pear, test_label, test_pred, test_mask = train_or_eval_model(model, loss_function, test_loader, e)\r\n writer.add_scalar(\"Train Loss - MOSEI Regression\", train_loss, e)\r\n writer.add_scalar(\"Test Loss - MOSEI Regression\", test_loss, e)\r\n writer.add_scalar(\"Train MAE - MOSEI Regression\", train_mae, e)\r\n writer.add_scalar(\"Test MAE - MOSEI Regression\", test_mae, e)\r\n writer.add_scalar(\"Train Pearson - MOSEI Regression\", train_pear, e)\r\n writer.add_scalar(\"Test Pearson - MOSEI Regression\", test_pear, e)\r\n if best_loss == None or best_loss > test_loss:\r\n best_loss, best_label, best_pred, best_mask, best_pear =\\\r\n test_loss, test_label, test_pred, test_mask, test_pear\r\n\r\n print('Test performance..')\r\n print('Loss {} MAE {} r {}'.format(best_loss, round(mean_absolute_error(best_label,best_pred,sample_weight=best_mask),4), best_pear))" ]
[ [ "numpy.random.seed", "sklearn.metrics.mean_absolute_error", "scipy.stats.pearsonr", "torch.utils.data.DataLoader", "torch.utils.data.sampler.SubsetRandomSampler", "numpy.concatenate", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.cuda.device", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
oesst/Sound_Analytics
[ "4008c43b82e0b991bb3b37d8e61d63208fd67a69" ]
[ "crosscorrelation_tests.py" ]
[ "import wave\nimport numpy as np\nimport struct\nimport soundfile as sf\nimport matplotlib.pyplot as plt\n\nsignal_r ='/home/oesst/ownCloud/PhD/Sounds/sinus_500Hz_wn_bg.wav'\nsignal_l = '/home/oesst/ownCloud/PhD/Sounds/sinus_500Hz_different_wn_bg_quarter_phase_shifted.wav'\n\n\ndef gcc(a, b,max_delay = 0):\n a_fft = np.fft.fft(a)\n b_fft = np.fft.fft(b)\n\n b_conj = b_fft.conj()\n\n nom = a_fft * b_conj\n\n denom = abs(nom)\n\n gphat = np.fft.ifft(nom / denom)\n\n delay = np.argmax(gphat)\n\n if max_delay:\n\n if delay > (len(a) / 2):\n delay = np.argmax(np.flip(gphat,0)[0:max_delay])\n delay =-delay\n else:\n delay = np.argmax(gphat[0:max_delay])\n\n return delay, gphat\n\n\ndef cross_correlation_using_fft( x, y):\n from numpy.fft import fft, ifft, fftshift\n\n f1 = fft(x)\n f2 = fft(np.flipud(y))\n cc = np.real(ifft(f1 * f2))\n return fftshift(cc)\n\n\ndef find_delay( a, b, max_delay=0):\n # very accurate but not so fast as gcc\n # from scipy.signal import correlate\n # corr = correlate(a, b)\n # corr = np.correlate(a,b,'full')\n corr = cross_correlation_using_fft(a, b)\n # check only lags that are in range -max_delay and max_delay\n # print(corr)\n if max_delay:\n middle = np.int(np.ceil(len(corr) / 2))\n new_corr = np.zeros(len(corr))\n new_corr[middle - max_delay:middle + max_delay] = corr[middle - max_delay:middle + max_delay]\n lag = np.argmax(np.abs(new_corr)) - np.floor(len(new_corr) / 2)\n else:\n lag = np.argmax(np.abs(corr)) - np.floor(len(corr) / 2)\n\n return lag\n\n\n\ndef gcc_phat(sig, refsig, fs=1, max_tau=None, interp=16):\n '''\n This function computes the offset between the signal sig and the reference signal refsig\n using the Generalized Cross Correlation - Phase Transform (GCC-PHAT)method.\n '''\n\n # make sure the length for the FFT is larger or equal than len(sig) + len(refsig)\n n = sig.shape[0] + refsig.shape[0]\n\n # Generalized Cross Correlation Phase Transform\n SIG = np.fft.rfft(sig, n=n)\n REFSIG = np.fft.rfft(refsig, n=n)\n R = SIG * np.conj(REFSIG)\n\n cc = np.fft.irfft(R / np.abs(R), n=(interp * n))\n\n max_shift = int(interp * n / 2)\n if max_tau:\n max_shift = np.minimum(int(interp * fs * max_tau), max_shift)\n\n cc = np.concatenate((cc[-max_shift:], cc[:max_shift + 1]))\n\n # find max cross correlation index\n shift = np.argmax(np.abs(cc)) - max_shift\n\n tau = shift / float(interp * fs)\n\n return tau, cc\n\n\ndata_r= sf.read(signal_r)\ndata_l= sf.read(signal_l)\n\ndata_r = data_r[0]\ndata_l = data_l[0]\n\n\n\n# delay,gphat = gcc_phat(data_r,data_l,fs=44100)\n#\n# print(delay/44100 *1000)\n#\n# delay_1,gphat_1 = gcc(data_r,data_l,352)\n#\n# print(delay_1)\n# print(delay_1/44100*1000)\n\ndelay_2 = find_delay(data_r,data_l,50)\n\n\nprint(delay_2)\nprint(delay_2/44100*1000)\n\n\n\n\n\n\n\n# [xr,lag]=xcorr(signal_l,signal_r);\n#\n# [mx,mind]=max(abs(xr));\n#\n# delay_eva=lag(mind)\n\n\n# print((len(tmp) / 2.0 - lag)/44100 *1000)\n#\n#print(delay)\n# print(delay_1)\n#\nplt.plot(data_r[1:20000])\nplt.plot(data_l[1:20000])\nplt.show()\n" ]
[ [ "numpy.conj", "numpy.fft.fft", "numpy.fft.rfft", "numpy.abs", "numpy.flipud", "numpy.fft.fftshift", "matplotlib.pyplot.plot", "numpy.fft.ifft", "numpy.concatenate", "numpy.argmax", "matplotlib.pyplot.show", "numpy.flip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JCH222/dfqueue
[ "890606e41e78d7bf8eaca96683b5a3cfaa7d2813" ]
[ "dfqueue/tests/scenarios/__init__.py" ]
[ "# coding: utf8\n\nfrom typing import Tuple, Dict, NoReturn, List, Any\nfrom pandas import DataFrame, Series\n\n\ndef create_queue_item(result: tuple, selected_columns: List[Any]) -> Tuple[str, Dict]:\n return [(result[0], {column: result[1][column] for column in selected_columns})]\n\n\ndef create_queue_items(results: List[tuple], selected_columns: List[Any]) -> List[Tuple[str, Dict]]:\n return [(result[0],\n {column: result[1][column] for column in selected_columns}) for result in results]\n\n\ndef add_row(dataframe: DataFrame, index: str, columns_dict: dict) -> Tuple[str, Dict]:\n dataframe.at[index] = Series(data=columns_dict)\n return index, columns_dict\n\n\ndef change_row_value(dataframe: DataFrame, index: str, new_columns_dict: dict) -> Tuple[str, Dict]:\n dataframe.at[index] = Series(data=new_columns_dict)\n return index, new_columns_dict\n\n\ndef remove_row(dataframe: DataFrame, index: str) -> NoReturn:\n dataframe.drop([index], inplace=True)\n" ]
[ [ "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Tnutlyrehc/MachineLearningExam
[ "29dfda9c814290288955a5f504c924f45f32f291" ]
[ "plain_cnn.py" ]
[ "import numpy as np\nimport pandas as pd\nimport os\nimport re\nimport tensorflow as tf\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport keras\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\nfrom keras.preprocessing import image\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\nfrom tensorflow.keras import regularizers\n\nfrom load_data import labels, X_train, X_test, X_val, y_train, y_test, y_val\n\n# Defining the model for CC\ninputs = Input(shape = (84,150, 3))\ny = Conv2D(32, 5, activation='relu')(inputs)\ny = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)\ny = Conv2D(64, 5, activation='relu')(y)\ny = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)\n\nx = keras.layers.Flatten()(y)\nx = Dense(128, activation= 'relu')(x)\noutputs = Dense(1, activation='sigmoid')(x)\n\nConvMod_CC = Model(inputs, outputs)\nConvMod_CC.summary()\nConvMod_CC.compile( optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\nCC_fit = ConvMod_CC.fit(X_train, labels.CC[y_train], epochs=25, batch_size=32, validation_data= (X_val, labels.CC[y_val]))\nConvMod_CC.save('models/plain_CC.h5')\nnp.save('plain_CC_training.npy', CC_fit.history)\n\n# Defining the model for D\ninputs = Input(shape = (84,150, 3))\ny = Conv2D(32, 5, activation='relu')(inputs)\ny = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)\ny = Conv2D(64, 5, activation='relu')(y)\ny = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)\n\nx = keras.layers.Flatten()(y)\nx = Dense(128, activation= 'relu')(x)\noutputs = Dense(5, activation='softmax')(x)\n\nConvMod_D = Model(inputs, outputs)\nConvMod_D.summary()\n# one hot encode labels for categorical_crossentropy loss\ncat = OneHotEncoder()\none_hot_D_train = cat.fit_transform(np.array(labels.D[y_train]).reshape(-1, 1)).toarray()\nprint(one_hot_D_train.shape)\none_hot_D_val = cat.fit_transform(np.array(labels.D[y_val]).reshape(-1, 1)).toarray()\nConvMod_D.compile( optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nD_fit = ConvMod_D.fit(X_train,one_hot_D_train, epochs=25, batch_size=32, validation_data= (X_val, one_hot_D_val))\nConvMod_D.save('models/plain_D.h5')\nnp.save('plain_D_training.npy', D_fit.history)\n\n# Defining the model for Y\ninputs = Input(shape = (84,150, 3))\ny = Conv2D(32, 5, activation='relu')(inputs)\ny = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)\ny = Conv2D(64, 5, activation='relu')(y)\ny = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)\n\nx = keras.layers.Flatten()(y)\nx = Dense(128, activation= 'relu')(x)\noutputs = Dense(11, activation='softmax')(x)\n\nConvMod_Y = Model(inputs, outputs)\nConvMod_Y.summary()\nConvMod_Y.compile( optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nY_fit = ConvMod_Y.fit(X_train, labels.Y[y_train], epochs=25, batch_size=32, validation_data= (X_val, labels.Y[y_val]))\nConvMod_Y.save('models/plain_Y.h5')\nnp.save('plain_Y_training.npy', Y_fit.history)" ]
[ [ "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "sklearn.preprocessing.OneHotEncoder", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.MaxPool2D", "numpy.save", "numpy.array", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
DiMorten/FCN_ConvLSTM_Crop_Recognition_Generalized
[ "2749c90fab6c3854c380f6bca945dd4e99c17239" ]
[ "networks/convlstm_networks/train_src/model_input_mode.py" ]
[ "import deb\nimport numpy as np\nimport pdb\nclass ModelInputMode():\n pass\nclass MIMFixed(ModelInputMode):\n def __init__(self):\n pass\n def batchTrainPreprocess(self, batch, data, label_date_id, batch_seq_len=12):\n input_ = batch['in'].astype(np.float16)\n input_ = data.addDoty(input_)\n return input_\n def batchValPreprocess(self, batch, data):\n input_ = ['in'].astype(np.float16)\n input_ = data.addDoty(input_)\n return input_\n\n def trainingInit(self,batch,data,t_len, model_t_len):\n batch['train']['shape'] = (batch['train']['size'], model_t_len) + data.patches['train']['in'].shape[2:]\n batch['val']['shape'] = (batch['val']['size'], model_t_len) + data.patches['val']['in'].shape[2:]\n batch['test']['shape'] = (batch['test']['size'], model_t_len) + data.patches['test']['in'].shape[2:]\n\n deb.prints(batch['train']['shape'])\n data.ds.dotyReplicateSamples()\n #data.labeled_dates = 12\n## deb.prints(data.labeled_dates)\n# min_seq_len = t_len - data.labeled_dates + 1 # 20 - 12 + 1 = 9\n# deb.prints(min_seq_len)\n\n return batch, data, None\n\n def valLabelSelect(self, data, label_id = -1):\n return data\nclass MIMFixedLabelSeq(MIMFixed):\n def __init__(self):\n pass\n def batchTrainPreprocess(self, batch, data, label_date_id, batch_seq_len=12):\n input_ = batch['train']['in'][:,:label_date_id].astype(np.float16)\n input_ = data.addDoty(input_)\n return input_\n def batchValPreprocess(self, batch, data):\n input_ = batch['val']['in'].astype(np.float16)\n input_ = data.addDoty(input_)\n return input_\n\nclass MIMVariable(ModelInputMode):\n\n def trainingInit(self,batch,data,t_len, model_t_len):\n batch['train']['shape'] = (batch['train']['size'], model_t_len) + data.patches['train']['in'].shape[2:]\n batch['val']['shape'] = (batch['val']['size'], model_t_len) + data.patches['val']['in'].shape[2:]\n batch['test']['shape'] = (batch['test']['size'], model_t_len) + data.patches['test']['in'].shape[2:]\n\n deb.prints(batch['train']['shape'])\n #data.labeled_dates = 12\n deb.prints(data.labeled_dates)\n min_seq_len = t_len - data.labeled_dates + 1 # 20 - 12 + 1 = 9\n deb.prints(min_seq_len)\n data.ds.setDotyFlag(True)\n return batch, data, min_seq_len\n def valLabelSelect(self, data, label_id = -1):\n \n data.patches['val']['label'] = data.patches['val']['label'][:, label_id]\n data.patches['test']['label'] = data.patches['test']['label'][:, label_id]\n deb.prints(data.patches['val']['label'].shape)\n\n deb.prints(data.patches['test']['label'].shape)\n return data\n\nclass MIMVarLabel(MIMVariable):\n def __init__(self):\n self.batch_seq_len = 12\n pass\n def batchTrainPreprocess(self, batch, data, label_date_id, batch_seq_len=12):\n \n #print(\"Label, seq start, seq end\",label_date_id,label_date_id-batch_seq_len+1,label_date_id+1)\n if label_date_id+1!=0:\n\n input_ = batch['in'][:, label_date_id-batch_seq_len+1:label_date_id+1]\n else:\n input_ = batch['in'][:, label_date_id-batch_seq_len+1:]\n #print(\"exception\", input_.shape)\n #print(input_.shape)\n #print(label_date_id, batch_seq_len, label_date_id-batch_seq_len+1, label_date_id+1)\n #pdb.set_trace()\n input_ = input_.astype(np.float16)\n input_ = data.addDoty(input_, \n bounds = [label_date_id-batch_seq_len+1, label_date_id+1])\n return input_\n def batchMetricSplitPreprocess(self, batch, data, label_date_id, batch_seq_len=12):\n return batchTrainPreprocess(batch, data, label_date_id, batch_seq_len)\n\nclass MIMFixedLabelAllLabels(MIMVarLabel):\n\n def valLabelSelect(self, data, label_id = -1):\n return data\n\nclass MIMVarLabel_PaddedSeq(MIMVarLabel):\n def batchTrainPreprocess(self, batch, ds, label_date_id, batch_seq_len=None):\n sample_n = batch['in'].shape[0]\n #print(\"Label, seq start, seq end\",label_date_id,label_date_id-batch_seq_len+1,label_date_id+1)\n if label_date_id+1!=0:\n if label_date_id in ds.padded_dates:\n unpadded_input = batch['in'][:, :label_date_id+1]\n len_input_seq = unpadded_input.shape[1]\n #deb.prints(len_input_seq)\n input_ = np.zeros(batch['shape']).astype(np.float16)\n input_[:, -len_input_seq:] = unpadded_input\n else:\n #print(batch['in'].shape,label_date_id-self.batch_seq_len+1,label_date_id+1)\n input_ = batch['in'][:, label_date_id-self.batch_seq_len+1:label_date_id+1]\n ##print(input_.shape)\n\n else:\n #print(batch['in'].shape,label_date_id-self.batch_seq_len+1,label_date_id+1)\n input_ = batch['in'][:, label_date_id-self.batch_seq_len+1:]\n ##print(input_.shape)\n\n #print(\"exception\", input_.shape)\n input_ = input_.astype(np.float16)\n input_ = ds.addDotyPadded(input_, \n bounds = [label_date_id-self.batch_seq_len+1, label_date_id+1], \n seq_len = self.batch_seq_len,\n sample_n = sample_n)\n #print(len(input_), input_[0].shape, input_[1].shape)\n \n return input_\n def batchMetricSplitPreprocess(self, batch, data, split='val'):\n input_ = batch[split]['in'][:,-12:].astype(np.float16)\n input_ = data.addDoty(input_, bounds=[-12, None])\n return input_\n # to do: replace batchMetricSplitPreprocess by iteration of all 12 labels,\n # including padded first input sequences.\n def valLabelSelect(self, data, label_id = -1):\n return data\n\n\nclass MIMVarSeqLabel(MIMVariable):\n def __init__(self):\n pass\n def batchTrainPreprocess(self, batch, data, label_date_id, batch_seq_len, t_len):\n\n # self.t_len is 20 as an example \n ##label_date_id = np.random.randint(-data.labeled_dates,0) # labels can be from -1 to -12\n # example: last t_step can use entire sequence: 20 + (-1+1) = 20\n # example: first t_step can use sequence: 20 + (-12+1) = 9\n # to do: add sep17 image \n max_seq_len = t_len + (label_date_id+1) # from 9 to 20\n \n if min_seq_len == max_seq_len:\n batch_seq_len = min_seq_len\n else:\n batch_seq_len = np.random.randint(min_seq_len,max_seq_len+1) # from 9 to 20 in the largest case\n\n # example: -1-20+1:-1 = -20:-1\n # example: -12-9+1:-12 = -20:-12\n # example: -3-11+1:-3 = -13:-3 \n # example: -1-18+1:-1+1 = -18:0\n ##print(\"Batch slice\",label_date_id-batch_seq_len+1,label_date_id+1)\n ##deb.prints(label_date_id+1!=0)\n ##deb.prints(label_date_id)\n if label_date_id+1!=0:\n batch['train']['in'] = batch['train']['in'][:, label_date_id-batch_seq_len+1:label_date_id+1]\n else:\n batch['train']['in'] = batch['train']['in'][:, label_date_id-batch_seq_len+1:]\n\n #deb.prints(batch['train']['in'].shape[1])\n #deb.prints(batch['train']['in'].shape[1] == batch_seq_len)\n #deb.prints(batch_seq_len)\n #deb.prints(label_date_id)\n assert batch['train']['in'].shape[1] == batch_seq_len\n\n input_ = np.zeros(batch['train']['shape']).astype(np.float16)\n input_[:, -batch_seq_len:] = batch['train']['in']\n input_ = data.addDoty(input_)\n return input_\n" ]
[ [ "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Cospel/addons
[ "af6866a2e6d9ddbc79d612d7cb04a8a5befe4a47" ]
[ "tensorflow_addons/optimizers/tests/yogi_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Yogi optimizer.\"\"\"\n\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\nfrom tensorflow_addons.optimizers import yogi\nfrom tensorflow_addons.utils import test_utils\n\n\ndef yogi_update_numpy(\n param,\n g_t,\n t,\n m,\n v,\n alpha=0.01,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-3,\n l1reg=0.0,\n l2reg=0.0,\n):\n \"\"\"Performs Yogi parameter update using numpy.\n\n Args:\n param: An numpy ndarray of the current parameter.\n g_t: An numpy ndarray of the current gradients.\n t: An numpy ndarray of the current time step.\n m: An numpy ndarray of the 1st moment estimates.\n v: An numpy ndarray of the 2nd moment estimates.\n alpha: A float value of the learning rate.\n beta1: A float value of the exponential decay rate for the 1st moment\n estimates.\n beta2: A float value of the exponential decay rate for the 2nd moment\n estimates.\n epsilon: A float of a small constant for numerical stability.\n l1reg: A float value of L1 regularization\n l2reg: A float value of L2 regularization\n Returns:\n A tuple of numpy ndarrays (param_t, m_t, v_t) representing the\n updated parameters for `param`, `m`, and `v` respectively.\n \"\"\"\n beta1 = np.array(beta1, dtype=param.dtype)\n beta2 = np.array(beta2, dtype=param.dtype)\n\n alpha_t = alpha * np.sqrt(1 - beta2 ** t) / (1 - beta1 ** t)\n\n m_t = beta1 * m + (1 - beta1) * g_t\n g2_t = g_t * g_t\n v_t = v - (1 - beta2) * np.sign(v - g2_t) * g2_t\n\n per_coord_lr = alpha_t / (np.sqrt(v_t) + epsilon)\n param_t = param - per_coord_lr * m_t\n\n if l1reg > 0:\n param_t = (param_t - l1reg * per_coord_lr * np.sign(param_t)) / (\n 1 + l2reg * per_coord_lr\n )\n print(param_t.dtype)\n param_t[np.abs(param_t) < l1reg * per_coord_lr] = 0.0\n elif l2reg > 0:\n param_t = param_t / (1 + l2reg * per_coord_lr)\n return param_t, m_t, v_t\n\n\ndef get_beta_accumulators(opt, dtype):\n local_step = tf.cast(opt.iterations + 1, dtype)\n beta_1_t = tf.cast(opt._get_hyper(\"beta_1\"), dtype)\n beta_1_power = tf.math.pow(beta_1_t, local_step)\n beta_2_t = tf.cast(opt._get_hyper(\"beta_2\"), dtype)\n beta_2_power = tf.math.pow(beta_2_t, local_step)\n return (beta_1_power, beta_2_power)\n\n\ndef _dtypes_to_test(use_gpu):\n if use_gpu:\n return [tf.dtypes.float32, tf.dtypes.float64]\n else:\n return [tf.dtypes.half, tf.dtypes.float32, tf.dtypes.float64]\n\n\ndef do_test_sparse(beta1=0.0, l1reg=0.0, l2reg=0.0):\n for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):\n # Initialize variables for numpy implementation.\n m0, v0, m1, v1 = 0.0, 1.0, 0.0, 1.0\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)\n\n var0 = tf.Variable(var0_np)\n var1 = tf.Variable(var1_np)\n grads0_np_indices = np.array([0, 1], dtype=np.int32)\n grads0 = tf.IndexedSlices(\n tf.constant(grads0_np), tf.constant(grads0_np_indices), tf.constant([2])\n )\n grads1_np_indices = np.array([0, 1], dtype=np.int32)\n grads1 = tf.IndexedSlices(\n tf.constant(grads1_np), tf.constant(grads1_np_indices), tf.constant([2])\n )\n opt = yogi.Yogi(\n beta1=beta1,\n l1_regularization_strength=l1reg,\n l2_regularization_strength=l2reg,\n initial_accumulator_value=1.0,\n )\n\n # Fetch params to validate initial values.\n np.testing.assert_allclose(np.asanyarray([1.0, 2.0]), var0.numpy())\n np.testing.assert_allclose(np.asanyarray([3.0, 4.0]), var1.numpy())\n\n # Run 3 steps of Yogi.\n for t in range(1, 4):\n beta1_power, beta2_power = get_beta_accumulators(opt, dtype)\n test_utils.assert_allclose_according_to_type(beta1 ** t, beta1_power)\n test_utils.assert_allclose_according_to_type(0.999 ** t, beta2_power)\n opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n\n var0_np, m0, v0 = yogi_update_numpy(\n var0_np, grads0_np, t, m0, v0, beta1=beta1, l1reg=l1reg, l2reg=l2reg\n )\n var1_np, m1, v1 = yogi_update_numpy(\n var1_np, grads1_np, t, m1, v1, beta1=beta1, l1reg=l1reg, l2reg=l2reg\n )\n\n # Validate updated params.\n test_utils.assert_allclose_according_to_type(\n var0_np, var0.numpy(),\n )\n test_utils.assert_allclose_according_to_type(\n var1_np, var1.numpy(),\n )\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_sparse():\n do_test_sparse()\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_sparse_regularization():\n do_test_sparse(l1reg=0.1, l2reg=0.2)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_sparse_momentum():\n do_test_sparse(beta1=0.9)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_sparse_momentum_regularization():\n do_test_sparse(beta1=0.9, l1reg=0.1, l2reg=0.2)\n\n\ndef test_sparse_repeated_indices():\n for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):\n repeated_index_update_var = tf.Variable([[1.0], [2.0]], dtype=dtype)\n aggregated_update_var = tf.Variable([[1.0], [2.0]], dtype=dtype)\n grad_repeated_index = tf.IndexedSlices(\n tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),\n tf.constant([1, 1]),\n tf.constant([2, 1]),\n )\n grad_aggregated = tf.IndexedSlices(\n tf.constant([0.2], shape=[1, 1], dtype=dtype),\n tf.constant([1]),\n tf.constant([2, 1]),\n )\n opt1 = yogi.Yogi()\n opt2 = yogi.Yogi()\n\n np.testing.assert_allclose(\n aggregated_update_var.numpy(), repeated_index_update_var.numpy(),\n )\n\n for _ in range(3):\n opt1.apply_gradients([(grad_repeated_index, repeated_index_update_var)])\n opt2.apply_gradients([(grad_aggregated, aggregated_update_var)])\n\n np.testing.assert_allclose(\n aggregated_update_var.numpy(), repeated_index_update_var.numpy(),\n )\n\n\ndef do_test_basic(beta1=0.0, l1reg=0.0, l2reg=0.0):\n for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):\n # Initialize variables for numpy implementation.\n m0, v0, m1, v1 = 0.0, 1.0, 0.0, 1.0\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)\n\n var0 = tf.Variable(var0_np)\n var1 = tf.Variable(var1_np)\n grads0 = tf.constant(grads0_np)\n grads1 = tf.constant(grads1_np)\n\n opt = yogi.Yogi(\n beta1=beta1,\n l1_regularization_strength=l1reg,\n l2_regularization_strength=l2reg,\n initial_accumulator_value=1.0,\n )\n\n # Fetch params to validate initial values.\n np.testing.assert_allclose(np.asanyarray([1.0, 2.0]), var0.numpy())\n np.testing.assert_allclose(np.asanyarray([3.0, 4.0]), var1.numpy())\n\n # Run 3 steps of Yogi.\n for t in range(1, 4):\n beta1_power, beta2_power = get_beta_accumulators(opt, dtype)\n test_utils.assert_allclose_according_to_type(beta1 ** t, beta1_power)\n test_utils.assert_allclose_according_to_type(0.999 ** t, beta2_power)\n\n opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n\n var0_np, m0, v0 = yogi_update_numpy(\n var0_np, grads0_np, t, m0, v0, beta1=beta1, l1reg=l1reg, l2reg=l2reg\n )\n var1_np, m1, v1 = yogi_update_numpy(\n var1_np, grads1_np, t, m1, v1, beta1=beta1, l1reg=l1reg, l2reg=l2reg\n )\n\n # Validate updated params.\n test_utils.assert_allclose_according_to_type(var0_np, var0.numpy())\n test_utils.assert_allclose_according_to_type(var1_np, var1.numpy())\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_basic():\n do_test_basic()\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_basic_regularization():\n do_test_basic(l1reg=0.1, l2reg=0.2)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_basic_momentum():\n do_test_basic(beta1=0.9)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_basic_momentum_regularization():\n do_test_basic(beta1=0.9, l1reg=0.1, l2reg=0.2)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_tensor_learning_rate():\n for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):\n # Initialize variables for numpy implementation.\n m0, v0, m1, v1 = 0.0, 1.0, 0.0, 1.0\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)\n\n var0 = tf.Variable(var0_np)\n var1 = tf.Variable(var1_np)\n grads0 = tf.constant(grads0_np)\n grads1 = tf.constant(grads1_np)\n opt = yogi.Yogi(tf.constant(0.01), initial_accumulator_value=1.0)\n\n # Fetch params to validate initial values.\n np.testing.assert_allclose(np.asanyarray([1.0, 2.0]), var0.numpy())\n np.testing.assert_allclose(np.asanyarray([3.0, 4.0]), var1.numpy())\n\n # Run 3 steps of Yogi.\n for t in range(1, 4):\n beta1_power, beta2_power = get_beta_accumulators(opt, dtype)\n test_utils.assert_allclose_according_to_type(0.9 ** t, beta1_power)\n test_utils.assert_allclose_according_to_type(0.999 ** t, beta2_power)\n\n opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n\n var0_np, m0, v0 = yogi_update_numpy(var0_np, grads0_np, t, m0, v0)\n var1_np, m1, v1 = yogi_update_numpy(var1_np, grads1_np, t, m1, v1)\n\n # Validate updated params.\n test_utils.assert_allclose_according_to_type(var0_np, var0.numpy())\n test_utils.assert_allclose_according_to_type(var1_np, var1.numpy())\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_sharing():\n for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):\n # Initialize variables for numpy implementation.\n m0, v0, m1, v1 = 0.0, 1.0, 0.0, 1.0\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)\n\n var0 = tf.Variable(var0_np)\n var1 = tf.Variable(var1_np)\n grads0 = tf.constant(grads0_np)\n grads1 = tf.constant(grads1_np)\n opt = yogi.Yogi(initial_accumulator_value=1.0)\n\n # Fetch params to validate initial values.\n np.testing.assert_allclose(np.asanyarray([1.0, 2.0]), var0.numpy())\n np.testing.assert_allclose(np.asanyarray([3.0, 4.0]), var1.numpy())\n\n # Run 3 steps of intertwined Yogi1 and Yogi2.\n for t in range(1, 4):\n beta1_power, beta2_power = get_beta_accumulators(opt, dtype)\n test_utils.assert_allclose_according_to_type(0.9 ** t, beta1_power)\n test_utils.assert_allclose_according_to_type(0.999 ** t, beta2_power)\n opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n var0_np, m0, v0 = yogi_update_numpy(var0_np, grads0_np, t, m0, v0)\n var1_np, m1, v1 = yogi_update_numpy(var1_np, grads1_np, t, m1, v1)\n\n # Validate updated params.\n test_utils.assert_allclose_according_to_type(var0_np, var0.numpy())\n test_utils.assert_allclose_according_to_type(var1_np, var1.numpy())\n\n\ndef test_get_config():\n opt = yogi.Yogi(1e-4)\n config = opt.get_config()\n assert config[\"learning_rate\"] == 1e-4\n\n\ndef test_serialization():\n optimizer = yogi.Yogi(1e-4)\n config = tf.keras.optimizers.serialize(optimizer)\n new_optimizer = tf.keras.optimizers.deserialize(config)\n assert new_optimizer.get_config() == optimizer.get_config()\n" ]
[ [ "tensorflow.keras.optimizers.deserialize", "tensorflow.constant", "tensorflow.Variable", "numpy.sqrt", "numpy.abs", "tensorflow.cast", "numpy.sign", "numpy.asanyarray", "tensorflow.test.is_gpu_available", "tensorflow.math.pow", "numpy.array", "tensorflow.keras.optimizers.serialize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
joamatab/photonic-coupling-drivers
[ "c12581d8e2158a292e1c585e45c0207c8129c0f1" ]
[ "plab/smu/sweep_current.py" ]
[ "from typing import Iterable, Union, Optional\nfrom time import strftime, localtime\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport qontrol\nfrom plab.config import logger, CONFIG\nfrom plab.measurement import measurement, Measurement\nfrom plab.smu.smu_control import smu_control\n\n\n@measurement\ndef sweep_current(\n imin: float = 0, imax: float = 50e-3, steps: int = 20, n: int = 1\n) -> pd.DataFrame:\n \"\"\"Sweep current and measure voltage. works only for q8iv\n\n Args:\n imin: min current\n imax: max current\n steps: number of steps\n n: number of channels to sweep\n \"\"\"\n currents = np.linspace(imin, imax, steps)\n df = pd.DataFrame(dict(i=currents))\n\n if isinstance(n, int):\n channels = range(n)\n else:\n channels = n\n for channel in channels:\n currents = np.zeros_like(currents)\n # set all channels to zero\n q.v[:] = 0\n for j, voltage in enumerate(currents):\n q.i[channel] = float(voltage)\n measured_voltage = q.v[channel]\n measured_current = q.i[channel]\n currents[j] = measured_current\n\n df[f\"i_{channel}\"] = currents\n return df\n\n\ndef get_current(channel: int, voltage: float) -> float:\n \"\"\"Sets voltage for a channel and returns measured current.\n\n Args:\n channel:\n voltage:\n\n \"\"\"\n q = smu_qontrol()\n q.v[channel] = float(voltage)\n return q.i[channel]\n\n\ndef zero_voltage() -> None:\n \"\"\"Sets all voltage channels to zero.\"\"\"\n q = smu_qontrol()\n q.v[:] = 0\n return\n\n\nif __name__ == \"__main__\":\n zero_voltage()\n # print(get_current(62, 0.1))\n # m = sweep_voltage(vmax=3, channels=(1,))\n # m.write()\n" ]
[ [ "numpy.zeros_like", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
watsonjj/oxpytools
[ "99f01d0a979d4592d5295b431f32676ba452d39a" ]
[ "jason_testbench/chec_time_pipeline.py" ]
[ "import argparse\nimport numpy as np\nimport target_calib\nfrom astropy import log\nfrom ctapipe.calib.camera.integrators import local_peak_integration\nfrom targetpipe.io.files import CHECInputFile as InputFile\nfrom targetpipe.io.pixels import Pixels\nfrom targetpipe.utils.plotting import intensity_to_hex\n\nfrom IPython import embed\nfrom tqdm import tqdm\n\n\nparams = dict(integration_window=[7, 3])\n\ninput_path = '/Users/Jason/Software/outputs/lab/ped_wfs.fits'\ninput_file = InputFile(input_path, max_events=1000)\nn_events = input_file.num_events\ntelid = 0\n\nfirst_event = input_file.get_event(0)\nn_pix = first_event.dl0.tel[0].adc_samples[telid].shape[0]\nn_samples = first_event.dl0.tel[0].adc_samples[telid].shape[1]\n\nped_obj = target_calib.Pedestal(32)\npixels = Pixels()\npix2tmtmpix = pixels.pix_to_tm_tmpix.astype(np.uint16)\n\ndef generate_pedestal():\n source = input_file.read()\n desc = \"Filling pedestal\"\n with tqdm(total=n_events, desc=desc) as pbar:\n for event in source:\n pbar.update(1)\n first_cell_ids = event.dl0.tel[telid].first_cell_ids\n waveforms = event.dl0.tel[telid].adc_samples[0]\n ped_obj.AddEvent(pix2tmtmpix, waveforms, first_cell_ids)\n return ped_obj\n#Initial: 30Hz\n#Update to new hessio layout: 35Hz\n#Reduced to 96 samples: 36Hz\n#(IO)Loop pixels in c++: 73Hz\n#(Add Ped)Loop pixels in c++: 280Hz\n\ndef calibrate(ped_obj):\n source = input_file.read()\n\n pedsub = np.empty((n_pix, n_samples), dtype=np.float32)\n\n with tqdm(total=n_events, desc='Calibrating') as pbar:\n for event in source:\n pbar.update(1)\n waveforms = event.dl0.tel[telid].adc_samples[0]\n fci = event.dl0.tel[telid].first_cell_ids\n\n pedsub[:] = waveforms[:]\n if ped_obj:\n ped_obj.ApplyEvent(pix2tmtmpix, waveforms, fci, pedsub)\n\n # TODO: Transfer function\n\n # TODO: Proper extraction (using components)\n charge = local_peak_integration(pedsub[None, ...], params)\n\n # TODO: Convert to pe?\n#Initial: 28Hz\n#Update to new hessio layout: 32Hz\n#Reduced to 96 samples: 32Hz\n#(IO)Loop pixels in c++: 60Hz\n#(Sub Ped)Loop pixels in c++: 170Hz\n\ndef colours():\n source = input_file.read()\n\n pedsub = np.empty((n_pix, n_samples), dtype=np.float32)\n\n with tqdm(total=n_events, desc='Calibrating') as pbar:\n for event in source:\n pbar.update(1)\n waveforms = event.dl0.tel[telid].adc_samples[0]\n # h = intensity_to_hex(waveforms[:, 0])\n\n\nembed()\n" ]
[ [ "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
trsavi/Machine-Learning-Web-App
[ "fd7567bfa588c06e0d564ef1dd72e8466003e33c" ]
[ "carClass.py" ]
[ "\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 17 19:42:00 2021\r\n\r\n@author: Vukasin\r\n\"\"\"\r\n\r\n\r\nimport pandas as pd\r\n\r\ndata = pd.read_csv('./Data/usedCleanedPre.csv')\r\n\r\n\r\n# Get bradns\r\ndef get_brands():\r\n return sorted(list(data['Marka'].unique()))\r\n\r\n# Get models\r\ndef get_models(brend):\r\n return sorted(list(data[data['Marka']==brend]['Model'].unique()))\r\n\r\n# Get car types\r\ndef get_car_types(brend, model):\r\n return list(data[(data['Model']==model) & (data['Marka']==brend)]['Karoserija'].unique())\r\n \r\n# Get car year\r\ndef get_car_year(model):\r\n return int(data[data['Model']==model]['Godiste'].min()), int(data[data['Model']==model]['Godiste'].max())\r\n\r\ndef get_car_mileage(model, year):\r\n return int(data[(data['Model']==model) & (data['Godiste']==year)]['Kilometraza'].min()), int(data[(data['Model']==model) & (data['Godiste']==year)]['Kilometraza'].max())\r\n\r\n# Get car volume\r\ndef get_car_volume(model, karoserija):\r\n return sorted(list(data[(data['Model']==model) & (data['Karoserija']==karoserija)]['Kubikaza'].unique()))\r\n\r\n# Get fuel type\r\ndef get_fuel_type(model, karoserija):\r\n return list(data[(data['Model']==model) & (data['Karoserija']==karoserija)]['Gorivo'].unique())\r\n\r\n# Get engine power\r\ndef get_engine_power(model, kubikaza):\r\n return sorted(list(data[(data['Model']==model) & (data['Kubikaza']==kubikaza)]['Snaga motora'].unique()))\r\n\r\n\r\n# Get car drive system\r\ndef get_drive(model, car_type):\r\n return sorted(list(data[(data['Model']==model) & (data['Karoserija']==car_type)]['Pogon'].unique()))\r\n\r\n# Get type of shift\r\ndef get_shift(model, car_type):\r\n return sorted(list(data[(data['Model']==model) & (data['Karoserija']==car_type)]['Menjac'].unique()))\r\n\r\ndef get_emmision_class(model, year):\r\n return sorted(list(data[(data['Model']==model) & (data['Godiste']==year)]['EKM'].unique()))\r\n\r\n# Get car colors\r\ndef get_material():\r\n return sorted(list(data['Materijal enterijera'].unique()))\r\n\r\n# Get car colors\r\ndef get_colors():\r\n return sorted(list(data['Boja'].unique()))\r\n\r\n# Get car ac\r\ndef get_ac(model):\r\n return sorted(list(data[(data['Model']==model)]['Klima'].unique()))\r\n\r\n\r\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
TheGreymanShow/House.Viz
[ "86773ebd8ed1802c7f38ace647ca28679b00f8fb" ]
[ "Scripts/Normalize_data.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\n\ndataset = pd.read_csv(\"C:/Users/admin/Desktop/Stevens Internship 2017/Datasets/Final/final_plotset500.csv\")\ndataset2 = pd.read_csv(\"C:/Users/admin/Desktop/Stevens Internship 2017/Datasets/Final/final_plotset500.csv\")\n\n#------------------------------YEAR BUILT--------------------------------------\nmin = 1700 \nmax = 2016\na = 0\nb = 10\narray = []\norig_array = []\ndf = pd.DataFrame()\ndf[\"YEAR.BUILT\"] = 0\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"YEAR.BUILT\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"YEAR.BUILT\"] = array \ndf[\"YEAR.BUILT\"] = orig_array \n \n#------------------------------LAND SQUARE FEET--------------------------------------\nmin = 300 \nmax = 10300\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"LAND.SQUARE.FEET\"]=0\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"LAND.SQUARE.FEET\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"LAND.SQUARE.FEET\"] = array \ndf[\"LAND.SQUARE.FEET\"] = orig_array \n\n#------------------------------SALE PRICE--------------------------------------\nmin = 75000\nmax = 7750000\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"SALE.PRICE\"] = 0\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"SALE.PRICE\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"SALE.PRICE\"] = array \ndf[\"SALE.PRICE\"] = orig_array \n\n#------------------------------SCHOOLS--------------------------------------\nmin = 0 \nmax = 20\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Schools\"]=0\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Schools\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Schools\"] = array \ndf[\"Schools\"] = orig_array \n\ndataset.to_csv(\"C:/Users/admin/Desktop/Stevens Internship 2017/Datasets/Final/dataset500.csv\")\ndf.to_csv(\"C:/Users/admin/Desktop/Stevens Internship 2017/Datasets/Final/orig_dataset500.csv\") \n \n \n#------------------------------BUS STATION--------------------------------------\nmin = 1 \nmax = 10\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Bus_station\"]=0\n\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Bus_station\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Bus_station\"] = array \ndf[\"Bus_station\"] = orig_array \n\n#------------------------------SUBWAY STATION--------------------------------------\nmin = 0 \nmax = 3\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Subway_station\"] = 0\n\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Subway_station\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Subway_station\"] = array \ndf[\"Subway_station\"] = orig_array \n\n#------------------------------TRAIN STATION--------------------------------------\nmin = 0 \nmax = 2\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Train_station\"]=0\n\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Train_station\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Train_station\"] = array \ndf[\"Train_station\"] = orig_array \n\n#------------------------------HOSPITAL--------------------------------------\nmin = 5 \nmax = 25\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Hospitals\"]=0\n\ndataset.describe()\n\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset2.iterrows():\n value = float(row[\"Hospitals\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Hospitals\"] = array \ndf[\"Hospitals\"] = orig_array \n\n#------------------------------POLICE STATION--------------------------------------\nmin = 0\nmax = 2\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Police_station\"]=0\n\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Police_station\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Police_station\"] = array \ndf[\"Police_station\"] = orig_array \n\n#------------------------------FIRE STATION--------------------------------------\nmin = 0 \nmax = 3\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Fire_station\"]=0\n\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Fire_station\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Fire_station\"] = array \ndf[\"Fire_station\"] = orig_array \n\n#------------------------------PARKS--------------------------------------\nmin = 3 \nmax = 20\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Parks\"]=0\n \ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Parks\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Parks\"] = array \ndf[\"Parks\"] = orig_array \n \n#------------------------------RESTAURANT--------------------------------------\nmin = 3\nmax = 56\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Restaurants\"]=0\n\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Restaurants\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Restaurants\"] = array \ndf[\"Restaurants\"] = orig_array \n \n#------------------------------GROCERIES--------------------------------------\nmin = 6 \nmax = 58\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Groceries\"]=0\n\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Groceries\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Groceries\"] = array \ndf[\"Groceries\"] = orig_array \n \n#-----------------------------MOVIE THEATRE---------------------------------------\nmin = 0 \nmax = 4\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Movie_Theater\"]=0\n\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Movie_Theater\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Movie_Theater\"] = array \ndf[\"Movie_Theater\"] = orig_array \n \n#-----------------------------------BANKS--------------------------------------\nmin = 0\nmax = 6\na = 0\nb = 10\narray = []\norig_array = []\ndf[\"Bank\"]=0\n\ndef scale(x):\n return (((b-a)*(x-min))/(max-min)) + a\n\nfor index,row in dataset.iterrows():\n value = float(row[\"Bank\"])\n temp = scale(value)\n array.append(round(temp,2))\n orig_array.append(temp) \n \ndataset[\"Bank\"] = array \ndf[\"Bank\"] = orig_array \n\n#--------------------------------Save------------------------------------------\n\ndataset.to_csv(\"C:/Users/admin/Desktop/Stevens Internship 2017/Datasets/Final/dataset500.csv\")\ndf.to_csv(\"C:/Users/admin/Desktop/Stevens Internship 2017/Datasets/Final/orig_dataset500.csv\") \n\n \n#---------------------------HISTOGRAM------------------------------------------\n#plt.hist(arr[:100])\n#plt.xlabel('house')\n#plt.ylabel('score')\n#plt.title('House vs Score')\n#plt.axis([0,10,0,10])\n#plt.grid(True)\n#plt.show()\n\n \n#----------------------------------BAR PLOT------------------------------------\n#y_pos = np.arange(1000)\n#performance = arr[:1000] \n#plt.bar(y_pos, performance, align='center', alpha=0.5)\n#plt.xticks(y_pos)\n#plt.houses('Houses')\n#plt.ylabel('Year Built')\n#plt.title('House vs Year Built Rating') \n#plt.show()\n\n \n#-------------------------------SCATTER PLOT----------------------------------- \n#area = np.array(dataset[\"LAND.SQUARE.FEET\"])\n#price = np.array(dataset[\"SALE.PRICE\"])\n\n#year = np.array(dataset[\"YEAR.BUILT\"])\n#plt.scatter(arr,year)\n#plt.xlabel(\"Area of houses(Sq.Ft)\")\n#plt.ylabel(\"Price of house($USD)\")\n#plt.show()\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
ranocha/riemann
[ "d7b4d72d1147eee12e60a64f46a7e076b28a212d" ]
[ "riemann/shallow_1D_py.py" ]
[ "#!/usr/bin/env python\n# encoding: utf-8\nr\"\"\"\nRiemann solvers for the shallow water equations.\n\nThe available solvers are:\n * Roe - Use Roe averages to caluclate the solution to the Riemann problem\n * HLL - Use a HLL solver\n * Exact - Use a newton iteration to calculate the exact solution to the\n Riemann problem\n\n.. math::\n q_t + f(q)_x = 0\n\nwhere\n\n.. math::\n q(x,t) = \\left [ \\begin{array}{c} h \\\\ h u \\end{array} \\right ],\n\nthe flux function is\n\n.. math::\n f(q) = \\left [ \\begin{array}{c} h u \\\\ hu^2 + 1/2 g h^2 \\end{array}\\right ].\n\nand :math:`h` is the water column height, :math:`u` the velocity and :math:`g`\nis the gravitational acceleration.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport numpy as np\nfrom six.moves import range\n\nnum_eqn = 2\nnum_waves = 2\n\n\ndef shallow_roe_1D(q_l, q_r, aux_l, aux_r, problem_data):\n r\"\"\"\n Roe shallow water solver in 1d::\n\n ubar = (sqrt(u_l) + sqrt(u_r)) / (sqrt(h_l) + sqrt(h_r))\n cbar = sqrt( 0.5 * g * (h_l + h_r))\n\n W_1 = | 1 | s_1 = ubar - cbar\n | ubar - cbar |\n\n W_2 = | 1 | s_1 = ubar + cbar\n | ubar + cbar |\n\n a1 = 0.5 * ( - delta_hu + (ubar + cbar) * delta_h ) / cbar\n a2 = 0.5 * ( delta_hu - (ubar - cbar) * delta_h ) / cbar\n\n *problem_data* should contain:\n - *g* - (float) Gravitational constant\n - *efix* - (bool) Boolean as to whether a entropy fix should be used, if\n not present, false is assumed\n\n :Version: 1.0 (2009-02-05)\n \"\"\"\n\n # Array shapes\n num_rp = q_l.shape[1]\n\n # Output arrays\n wave = np.empty( (num_eqn, num_waves, num_rp) )\n s = np.zeros( (num_waves, num_rp) )\n amdq = np.zeros( (num_eqn, num_rp) )\n apdq = np.zeros( (num_eqn, num_rp) )\n\n # Compute roe-averaged quantities\n ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) /\n (np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) )\n cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:]))\n\n # Compute Flux structure\n delta = q_r - q_l\n a1 = 0.5 * (-delta[1,:] + (ubar + cbar) * delta[0,:]) / cbar\n a2 = 0.5 * ( delta[1,:] - (ubar - cbar) * delta[0,:]) / cbar\n\n # Compute each family of waves\n wave[0,0,:] = a1\n wave[1,0,:] = a1 * (ubar - cbar)\n s[0,:] = ubar - cbar\n\n wave[0,1,:] = a2\n wave[1,1,:] = a2 * (ubar + cbar)\n s[1,:] = ubar + cbar\n\n if problem_data['efix']:\n raise NotImplementedError(\"Entropy fix has not been implemented.\")\n else:\n s_index = np.zeros((2,num_rp))\n for m in range(num_eqn):\n for mw in range(num_waves):\n s_index[0,:] = s[mw,:]\n amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:]\n apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:]\n\n return wave, s, amdq, apdq\n\ndef shallow_hll_1D(q_l,q_r,aux_l,aux_r,problem_data):\n r\"\"\"\n HLL shallow water solver ::\n\n\n W_1 = Q_hat - Q_l s_1 = min(u_l-c_l,u_l+c_l,lambda_roe_1,lambda_roe_2)\n W_2 = Q_r - Q_hat s_2 = max(u_r-c_r,u_r+c_r,lambda_roe_1,lambda_roe_2)\n\n Q_hat = ( f(q_r) - f(q_l) - s_2 * q_r + s_1 * q_l ) / (s_1 - s_2)\n\n *problem_data* should contain:\n - *g* - (float) Gravitational constant\n\n :Version: 1.0 (2009-02-05)\n \"\"\"\n # Array shapes\n num_rp = q_l.shape[1]\n num_eqn = 2\n num_waves = 2\n\n # Output arrays\n wave = np.empty( (num_eqn, num_waves, num_rp) )\n s = np.empty( (num_waves, num_rp) )\n amdq = np.zeros( (num_eqn, num_rp) )\n apdq = np.zeros( (num_eqn, num_rp) )\n\n # Compute Roe and right and left speeds\n ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) /\n (np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) )\n cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:]))\n u_r = q_r[1,:] / q_r[0,:]\n c_r = np.sqrt(problem_data['grav'] * q_r[0,:])\n u_l = q_l[1,:] / q_l[0,:]\n c_l = np.sqrt(problem_data['grav'] * q_l[0,:])\n\n # Compute Einfeldt speeds\n s_index = np.empty((4,num_rp))\n s_index[0,:] = ubar+cbar\n s_index[1,:] = ubar-cbar\n s_index[2,:] = u_l + c_l\n s_index[3,:] = u_l - c_l\n s[0,:] = np.min(s_index,axis=0)\n s_index[2,:] = u_r + c_r\n s_index[3,:] = u_r - c_r\n s[1,:] = np.max(s_index,axis=0)\n\n # Compute middle state\n q_hat = np.empty((2,num_rp))\n q_hat[0,:] = ((q_r[1,:] - q_l[1,:] - s[1,:] * q_r[0,:]\n + s[0,:] * q_l[0,:]) / (s[0,:] - s[1,:]))\n q_hat[1,:] = ((q_r[1,:]**2/q_r[0,:] + 0.5 * problem_data['grav'] * q_r[0,:]**2\n - (q_l[1,:]**2/q_l[0,:] + 0.5 * problem_data['grav'] * q_l[0,:]**2)\n - s[1,:] * q_r[1,:] + s[0,:] * q_l[1,:]) / (s[0,:] - s[1,:]))\n\n # Compute each family of waves\n wave[:,0,:] = q_hat - q_l\n wave[:,1,:] = q_r - q_hat\n\n # Compute variations\n s_index = np.zeros((2,num_rp))\n for m in range(num_eqn):\n for mw in range(num_waves):\n s_index[0,:] = s[mw,:]\n amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:]\n apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:]\n\n return wave, s, amdq, apdq\n\n\ndef shallow_fwave_1d(q_l, q_r, aux_l, aux_r, problem_data):\n r\"\"\"Shallow water Riemann solver using fwaves\n\n Also includes support for bathymetry but be wary if you think you might have\n dry states as this has not been tested.\n\n *problem_data* should contain:\n - *grav* - (float) Gravitational constant\n - *dry_tolerance* - (float) Set velocities to zero if h is below this\n tolerance.\n - *sea_level* - (float) Datum from which the dry-state is calculated.\n\n :Version: 1.0 (2014-09-05)\n :Version: 2.0 (2017-03-07)\n \"\"\"\n\n g = problem_data['grav']\n dry_tolerance = problem_data['dry_tolerance']\n sea_level = problem_data['sea_level']\n\n num_rp = q_l.shape[1]\n num_eqn = 2\n num_waves = 2\n\n # Output arrays\n fwave = np.empty( (num_eqn, num_waves, num_rp) )\n s = np.empty( (num_waves, num_rp) )\n amdq = np.zeros( (num_eqn, num_rp) )\n apdq = np.zeros( (num_eqn, num_rp) )\n\n # Extract state\n u_l = np.where(q_l[0, :] > dry_tolerance,\n q_l[1, :] / q_l[0, :], 0.0)\n u_r = np.where(q_r[0, :] > dry_tolerance,\n q_r[1, :] / q_r[0, :], 0.0)\n phi_l = q_l[0, :] * u_l**2 + 0.5 * g * q_l[0, :]**2\n phi_r = q_r[0, :] * u_r**2 + 0.5 * g * q_r[0, :]**2\n h_bar = 0.5 * (q_l[0, :] + q_r[0, :])\n\n # Speeds\n u_hat = (np.sqrt(g * q_l[0, :]) * u_l + np.sqrt(g * q_r[0, :]) * u_r) \\\n / (np.sqrt(g * q_l[0, :]) + np.sqrt(g * q_r[0, :]))\n c_hat = np.sqrt(g * h_bar)\n s[0, :] = np.amin(np.vstack((u_l - np.sqrt(g * q_l[0, :]),\n u_hat - c_hat)), axis=0)\n s[1, :] = np.amax(np.vstack((u_r + np.sqrt(g * q_r[0, :]),\n u_hat + c_hat)), axis=0)\n\n delta1 = q_r[1, :] - q_l[1, :]\n delta2 = phi_r - phi_l + g * h_bar * (aux_r[0, :] - aux_l[0, :])\n\n beta1 = (s[1, :] * delta1 - delta2) / (s[1, :] - s[0, :])\n beta2 = (delta2 - s[0, :] * delta1) / (s[1, :] - s[0, :])\n\n fwave[0, 0, :] = beta1\n fwave[1, 0, :] = beta1 * s[0, :]\n fwave[0, 1, :] = beta2\n fwave[1, 1, :] = beta2 * s[1, :]\n\n for m in range(num_eqn):\n for mw in range(num_waves):\n amdq[m, :] += (s[mw, :] < 0.0) * fwave[m, mw, :]\n apdq[m, :] += (s[mw, :] > 0.0) * fwave[m, mw, :]\n\n amdq[m, :] += (s[mw, :] == 0.0) * fwave[m, mw, :] * 0.5\n apdq[m, :] += (s[mw, :] == 0.0) * fwave[m, mw, :] * 0.5\n\n return fwave, s, amdq, apdq\n\n\ndef shallow_exact_1D(q_l, q_r, aux_l, aux_r, problem_data):\n r\"\"\"\n Exact shallow water Riemann solver\n\n .. warning::\n This solver has not been implemented.\n\n \"\"\"\n raise NotImplementedError(\"The exact swe solver has not been implemented.\")\n" ]
[ [ "numpy.sqrt", "numpy.min", "numpy.max", "numpy.zeros", "numpy.where", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jajokine/Neural-Networks-Image-Recognition
[ "e00c176d40a577fd799827fd9a2255e546119340" ]
[ "mlp.py" ]
[ "# ---------------------------------------------------------------------------------------------#\n# #\n# Neural Network - PyTorch - Double Digit #\n# Fully connected neural network with a single hidden layer of 64 units to #\n# to classify images of two digits # # #\n# ---------------------------------------------------------------------------------------------#\n\n\n\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom train_utils import batchify_data, run_epoch, train_model, Flatten\nimport utils_multiMNIST as U\npath_to_data_dir = '../Datasets/'\nuse_mini_dataset = True\n\nbatch_size = 64\nnb_classes = 10\nnb_epoch = 30\nnum_classes = 10\nimg_rows, img_cols = 42, 28 # Image dimensions\n\n\nclass MLP(nn.Module):\n\n def __init__(self, input_dimension):\n super(MLP, self).__init__()\n self.flatten = Flatten()\n self.linear1 = nn.Linear(input_dimension, 64)\n self.linear2 = nn.Linear(64, 20) # Output of 20 labels\n\n def forward(self, x):\n xf = self.flatten(x)\n xl1 = self.linear1(xf)\n xl2 = self.linear2(xl1)\n out_first_digit = xl2[:, :10]\n out_second_digit = xl2[:, 10:]\n\n return out_first_digit, out_second_digit\n\n\ndef main():\n X_train, y_train, X_test, y_test = U.get_data(\n path_to_data_dir, use_mini_dataset)\n\n # Split into train and dev\n dev_split_index = int(9 * len(X_train) / 10) \n X_dev = X_train[dev_split_index:]\n y_dev = [y_train[0][dev_split_index:], y_train[1][dev_split_index:]]\n X_train = X_train[:dev_split_index]\n y_train = [y_train[0][:dev_split_index], y_train[1][:dev_split_index]]\n\n permutation = np.array([i for i in range(len(X_train))])\n np.random.shuffle(permutation)\n X_train = [X_train[i] for i in permutation]\n y_train = [[y_train[0][i] for i in permutation],\n [y_train[1][i] for i in permutation]]\n\n # Split dataset into batches\n train_batches = batchify_data(X_train, y_train, batch_size)\n dev_batches = batchify_data(X_dev, y_dev, batch_size)\n test_batches = batchify_data(X_test, y_test, batch_size)\n\n # Load model\n input_dimension = img_rows * img_cols\n model = MLP(input_dimension) \n\n # Train\n train_model(train_batches, dev_batches, model)\n\n # Evaluate the model on test data\n loss, acc = run_epoch(test_batches, model.eval(), None)\n print('Test loss1: {:.6f} accuracy1: {:.6f} loss2: {:.6f} accuracy2: {:.6f}'.format(\n loss[0], acc[0], loss[1], acc[1]))\n\n\nif __name__ == '__main__':\n # Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx\n np.random.seed(12321) # for reproducibility\n torch.manual_seed(12321) # for reproducibility\n main()\n" ]
[ [ "torch.nn.Linear", "torch.manual_seed", "numpy.random.seed", "numpy.random.shuffle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
korawat-tanwisuth/Proto_DA
[ "332e6ed5814db98d33cd92842012e57298b631fb" ]
[ "examples/proto.py" ]
[ "import random\nimport time\nimport warnings\nimport sys\nimport argparse\nimport copy\n\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nfrom torch.optim import SGD\nimport torch.utils.data\nfrom torch.utils.data import DataLoader\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nsys.path.append('.')\nfrom dalib.adaptation.proto import ProtoLoss, ImageClassifier\nimport dalib.vision.datasets as datasets\nimport dalib.vision.models as models\nfrom tools.utils import AverageMeter, ProgressMeter, accuracy, ForeverDataIterator\nfrom tools.transforms import ResizeImage\nfrom tools.lr_scheduler import StepwiseLR\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef main(args: argparse.Namespace):\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n cudnn.benchmark = True\n\n # Data loading code\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n train_transform = transforms.Compose([\n ResizeImage(256),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ])\n \n val_transform = transforms.Compose([\n ResizeImage(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize\n ])\n\n dataset = datasets.__dict__[args.data]\n train_source_dataset = dataset(root=args.root, task=args.source, download=True, transform=train_transform, subsample=args.sub_s)\n train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers, drop_last=True)\n train_target_dataset = dataset(root=args.root, task=args.target, download=True, transform=train_transform, subsample=args.sub_t)\n train_target_loader = DataLoader(train_target_dataset, batch_size=args.bs_tgt,\n shuffle=True, num_workers=args.workers, drop_last=True)\n val_dataset = dataset(root=args.root, task=args.target, download=True, transform=val_transform, subsample=args.sub_t)\n val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)\n if args.data == 'DomainNet':\n test_dataset = dataset(root=args.root, task=args.target, evaluate=True, download=True, transform=val_transform)\n test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)\n else:\n test_loader = val_loader\n\n train_source_iter = ForeverDataIterator(train_source_loader)\n train_target_iter = ForeverDataIterator(train_target_loader)\n\n # create model\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n backbone = models.__dict__[args.arch](pretrained=True)\n classifier = ImageClassifier(backbone, train_source_dataset.num_classes).to(device)\n\n # define loss function\n num_classes = train_source_dataset.num_classes\n domain_loss = ProtoLoss(args.nav_t, args.beta, num_classes, device, args.s_par).to(device)\n domain_loss.true_prop = torch.Tensor(train_target_dataset.proportion).unsqueeze(1).to(device)\n # define optimizer and lr scheduler\n optimizer = SGD(classifier.get_parameters(),\n args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)\n lr_scheduler = StepwiseLR(optimizer, init_lr=args.lr, gamma=args.lr_gamma, decay_rate=0.75)\n beta_scheduler = StepwiseLR(None, init_lr=args.beta, gamma=args.lr_gamma, decay_rate=0.75)\n \n # start training\n best_acc1 = 0.\n for epoch in range(args.epochs):\n # train for one epoch\n train(train_source_iter, train_target_iter, classifier, domain_loss, optimizer,\n lr_scheduler, beta_scheduler, epoch, args)\n print(domain_loss.prop.squeeze(1).tolist())\n # evaluate on validation set\n acc1 = validate(val_loader, classifier, domain_loss, args)\n\n # remember best acc@1 and save checkpoint\n if acc1 > best_acc1:\n best_model = copy.deepcopy(classifier.state_dict())\n best_acc1 = max(acc1, best_acc1)\n\n print(\"best_acc1 = {:3.1f}\".format(best_acc1))\n\n # evaluate on test set\n classifier.load_state_dict(best_model)\n acc1 = validate(test_loader, classifier, domain_loss, args)\n print(\"test_acc1 = {:3.1f}\".format(acc1))\n\ndef train(train_source_iter: ForeverDataIterator, train_target_iter: ForeverDataIterator,\n model: ImageClassifier, domain_loss: ProtoLoss, optimizer: SGD,\n lr_scheduler: StepwiseLR, beta_scheduler: StepwiseLR, epoch: int, args: argparse.Namespace):\n batch_time = AverageMeter('Time', ':5.2f')\n data_time = AverageMeter('Data', ':5.2f')\n losses = AverageMeter('Loss', ':6.2f')\n transfer_losses = AverageMeter('Transfer Loss', ':6.2f')\n prop_losses = AverageMeter('Prop Loss', ':6.6f')\n cls_accs = AverageMeter('Cls Acc', ':3.1f')\n progress = ProgressMeter(\n args.iters_per_epoch,\n [batch_time, data_time, transfer_losses, prop_losses, losses, cls_accs],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n domain_loss.train()\n\n end = time.time()\n for i in range(args.iters_per_epoch):\n lr_scheduler.step()\n beta_scheduler.step()\n\n # measure data loading time\n data_time.update(time.time() - end)\n \n x_s, labels_s = next(train_source_iter)\n x_t, _ = next(train_target_iter)\n x_s = x_s.to(device)\n x_t = x_t.to(device)\n labels_s = labels_s.to(device)\n x_list = [x_s, x_t]\n \n combined_x = torch.cat(x_list, dim=0)\n y, f = model(combined_x)\n del x_list\n\n prototypes_s = model.head.weight.data.clone()\n\n f_chunks = torch.split(f, [args.batch_size] + [args.bs_tgt], dim=0) \n f_s, f_t = f_chunks[0], f_chunks[-1]\n y_chunks = torch.split(y, [args.batch_size] + [args.bs_tgt], dim=0)\n y_s, y_t = y_chunks[0], y_chunks[-1]\n \n cls_loss = F.cross_entropy(y_s, labels_s)\n \n domain_loss.beta = beta_scheduler.get_lr()\n transfer_loss = domain_loss(prototypes_s, f_t)\n loss = cls_loss + transfer_loss * args.trade_off \n prop_loss = torch.abs(domain_loss.true_prop - domain_loss.prop).mean()\n \n cls_acc = accuracy(y_s, labels_s)[0]\n\n prop_losses.update(prop_loss.item(), prototypes_s.size(0)) \n transfer_losses.update(transfer_loss.item(), y_s.size(0)) \n losses.update(loss.item(), y_s.size(0))\n cls_accs.update(cls_acc.item(), y_s.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n\ndef validate(val_loader: DataLoader, model: ImageClassifier, domain_loss: nn.Module, args: argparse.Namespace) -> float:\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n images = images.to(device)\n target = target.to(device)\n\n # compute output\n y_t, f_t = model(images)\n loss = F.cross_entropy(y_t, target)\n\n mu_s = model.head.weight.data.clone()\n sim_mat = torch.matmul(mu_s, f_t.T)\n output = domain_loss.get_pos_logits(sim_mat, domain_loss.prop).T\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1.item(), images.size(0))\n top5.update(acc5.item(), images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\n\nif __name__ == '__main__':\n architecture_names = sorted(\n name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name])\n )\n dataset_names = sorted(\n name for name in datasets.__dict__\n if not name.startswith(\"__\") and callable(datasets.__dict__[name])\n )\n\n parser = argparse.ArgumentParser(description='PyTorch Domain Adaptation')\n parser.add_argument('root', metavar='DIR',\n help='root path of dataset')\n parser.add_argument('-d', '--data', metavar='DATA', default='Office31',\n help='dataset: ' + ' | '.join(dataset_names) +\n ' (default: Office31)')\n parser.add_argument('-s', '--source', help='source domain(s)')\n parser.add_argument('-t', '--target', help='target domain(s)')\n parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=architecture_names,\n help='backbone architecture: ' +\n ' | '.join(architecture_names) +\n ' (default: resnet18)')\n parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n parser.add_argument('--epochs', default=20, type=int, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('-b', '--batch-size', default=32, type=int,\n metavar='N',\n help='mini-batch size for source (default: 32)')\n parser.add_argument('--bs_tgt', default=96, type=int, \n help='target batch size')\n parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\n parser.add_argument('--lr-gamma', default=0.0002, type=float)\n parser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\n parser.add_argument('--wd', '--weight-decay',default=1e-3, type=float,\n metavar='W', help='weight decay (default: 1e-3)',\n dest='weight_decay')\n parser.add_argument('-p', '--print-freq', default=100, type=int,\n metavar='N', help='print frequency (default: 100)')\n parser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\n parser.add_argument('--trade-off', default=1., type=float,\n help='the trade-off hyper-parameter for transfer loss')\n parser.add_argument('-i', '--iters-per-epoch', default=1000, type=int,\n help='Number of iterations per epoch')\n parser.add_argument('-nav_t', '--nav_t', default=1, type=float,\n help='temperature for the navigator')\n parser.add_argument('-beta', '--beta', default=0, type=float,\n help='momentum coefficient')\n parser.add_argument('--s_par', default=0.5, type=float, \n help='s_par')\n parser.add_argument('--sub_s', default=False, action='store_true')\n parser.add_argument('--sub_t', default=False, action='store_true')\n\n\n\n args = parser.parse_args()\n print(args)\n main(args)\n\n" ]
[ [ "torch.abs", "torch.Tensor", "torch.cat", "torch.manual_seed", "torch.nn.functional.cross_entropy", "torch.utils.data.DataLoader", "torch.matmul", "torch.no_grad", "torch.cuda.is_available", "torch.split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GwynWilson/H2L
[ "201557dea156c8340108fecaa130ba3d3bac33df" ]
[ "PYH2L/TrainingSetCreator.py" ]
[ "import os\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom matplotlib.widgets import RectangleSelector, Cursor\n\nplt.switch_backend('QT5Agg')\npositive_response = ['Yes', 'yes', 'y', 'yeah', 'Yeah', 'Y']\n\n\"\"\"\nThis script allows the reading of the test images as arrays, splits the colour\nchannels, displays one of the channels using imshow. A draggable box can then\nbe drawn and the coordinates of two opposite corners of the rectangle (not sure\nwhich ones) are printed.\n\"\"\"\n\nix = -1\n\nlocal_repo_path = os.getcwd()\n\ninitial_check = input(\"Overwrite any pre-existing coordinates?\")\n\n\ndef func(filename):\n # Test images are now within repository so this line will set the correct directory as long as your local version is\n # up to date\n\n\n def func2(filename2=filename):\n os.chdir(\"Data/TestImages\")\n # Reading the image as an array.\n img = cv2.imread(\"{}.png\".format(filename2))\n # Using fig, ax to make the interactive bit work.\n fig, ax = plt.subplots()\n plt.imshow(img)\n\n coords = pd.DataFrame(columns=['blx', 'bly', 'trx', 'try'])\n\n def line_select_callback(eclick, erelease):\n global ix\n ix += 1\n x1, y1 = eclick.xdata, eclick.ydata\n x2, y2 = erelease.xdata, erelease.ydata\n\n rect = plt.Rectangle((min(x1, x2), min(y1, y2)), np.abs(x1 - x2), np.abs(y1 - y2), alpha=0.3)\n coords.at[ix, 'blx'] = int(x1)\n coords.at[ix, 'bly'] = int(y1)\n coords.at[ix, 'trx'] = int(x2)\n coords.at[ix, 'try'] = int(y2)\n print(ix)\n ax.add_patch(rect)\n\n rs = RectangleSelector(ax, line_select_callback,\n drawtype='box', useblit=False, button=[1],\n minspanx=5, minspany=5, spancoords='pixels',\n interactive=True)\n\n fig_manager = plt.get_current_fig_manager()\n fig_manager.window.showMaximized()\n cursor = Cursor(ax, useblit=True, color='red', linewidth=1)\n plt.show()\n os.chdir(local_repo_path + '\\Data\\TestCoords')\n if not coords.empty:\n coords.to_csv('{}.csv'.format(filename2), index=False)\n\n if filename + '.csv' in os.listdir(local_repo_path + \"\\Data\\TestCoords\"):\n check = input(\"Coordinate data for this image ({}) is already present. Overwrite pre-existing coordinates?\".format(filename))\n if check in positive_response:\n return func2()\n else:\n os.chdir(local_repo_path)\n pass\n else:\n os.chdir(local_repo_path)\n return func2()\n\n\n# func('Layer 2')\n\n\nfiles = [os.path.splitext(filename)[0] for filename in os.listdir(local_repo_path + \"\\Data\\TestImages\")]\nfor names in files:\n if names + '.csv' in os.listdir(local_repo_path + \"\\Data\\TestCoords\"):\n if initial_check not in positive_response:\n continue\n func(names)\n\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.abs", "matplotlib.widgets.Cursor", "matplotlib.pyplot.switch_backend", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.get_current_fig_manager", "matplotlib.widgets.RectangleSelector", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
xindi-dumbledore/NetInfoAccEqua
[ "f4b52166afb5504566b6e6bcfcfb9d01612ce30a" ]
[ "codes/simulation_param.py" ]
[ "import numpy as np\nM, E, N = 0.2, 2, 5000\nH, ALPHA = 0.8, 1\nPD, ED = 0.6, 1\n\nN_TRIALS = 10\nN_GRAPHS = 20\nBETA_ARRAY_SYM = np.array([[0.7, 0.7], [0.7, 0.7]])\nBETA_ARRAY_ASY = np.array([[0.7, 0.3], [0.3, 0.7]])\nGAMMA = 0.1\nSEED_NUM = 10\n\nMINORITY_SEEDING_PORTION_DICT = {\n \"low\": [0, 0.3], \"mid\": [0.3, 0.7], \"high\": [0.7, 1]}\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jsandersen/MRTviaHIL
[ "eaed679d014183b9b5bc4846db543c64e80592c8" ]
[ "src/model/stats.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.metrics import f1_score, roc_auc_score\n\nfrom tensorflow.keras.utils import to_categorical\n\n\nfrom timeit import default_timer as timer\n\nfrom random import random, randint, seed\n\nfrom tqdm import tqdm\nimport math\n\nfrom scipy.stats import entropy\n\ndef _round_05(x):\n y = math.floor(x*10)/10\n if y + 0.05 <= x:\n y += 0.05\n return round(y, 3)\n\ndef brier_multi(targets, probs):\n return np.mean(np.sum((probs - targets)**2, axis=1))\n\ndef predict(Model, X, y, skl_model=False, batch_size=None, epochs=None, callbacks=None, val=False, prob=True, save=None, load=None, **kwargs): \n \n dfs = []\n f1_micro_list = []\n f1_macro_list = [] \n auc_roc_list = []\n brier_list = []\n training_time = []\n inference_time = []\n it = 0\n \n sss = StratifiedShuffleSplit(n_splits=5,test_size=0.50, random_state=42) # 5\n \n for train_index, test_index in tqdm(sss.split(X, y), position=0, leave=True):\n \n clf = Model(**kwargs)\n \n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n \n if skl_model:\n train_start = timer()\n clf.fit(X_train, y_train)\n train_end = timer()\n else:\n # fit\n \n if load:\n train_end = -1\n train_start = 0\n clf.load_model('./model/%s_%s' % (load, it))\n else:\n if val:\n train_start = timer()\n clf.fit(X_train, y_train, X_test, y_test, batch_size, epochs, callbacks, 0.1)\n train_end = timer()\n else :\n train_start = timer()\n clf.fit(X_train, y_train, batch_size, epochs, callbacks, 0)\n train_end = timer() \n if save:\n clf.save_model('./model/%s_%s' % (save, it))\n \n training_time.append(train_end - train_start)\n it = it + 1\n \n # infer\n if prob:\n inference_start = timer()\n y_prob = clf.predict_proba(X_test)\n inference_end = timer()\n inference_time.append(inference_end - inference_start)\n y_pred = y_prob.argmax(axis=1)\n else:\n inference_start = timer()\n uncertainty = clf.uncertainty(X_test)\n inference_end = timer()\n if len(np.unique(y)) > 2:\n y_pred = uncertainty.argmax(axis=1)\n uncertainty = uncertainty.max(axis=1)\n else: \n y_pred = [0 if i < 0 else 1 for i in uncertainty]\n inference_time.append(inference_end - inference_start)\n \n # evaluate \n f1_micro_list.append(f1_score(y_test, y_pred, average='micro'))\n f1_macro_list.append(f1_score(y_test, y_pred, average='macro'))\n brier_list.append(brier_multi(np.array(to_categorical(y_test)), np.array(y_prob)))\n \n if prob:\n if len(np.unique(y)) == 2:\n auc_roc_list.append(roc_auc_score(y_test, y_prob[:,1]))\n else:\n auc_roc_list.append(roc_auc_score(y_test, y_prob, multi_class='ovr'))\n\n y_prob = [np.array(x) for x in y_prob]\n # save as dataframe\n data = {'y_pred': y_pred, 'y_prob': y_prob, 'y_true': y_test, 'unc' : entropy(y_prob, base=2, axis=1) if prob else uncertainty}\n df = pd.DataFrame(data=data)\n df_sort = df.sort_values('unc', ascending=prob).reset_index()\n \n dfs.append(df_sort)\n \n break\n \n f1_micro_list = np.array(f1_micro_list)\n f1_macro_list = np.array(f1_macro_list)\n brier_list = np.array(brier_list)\n auc_roc_list = np.array(auc_roc_list)\n \n training_time = np.array(training_time)\n inference_time = np.array(inference_time)\n print(' ')\n \n print('Model Performance (Mean/Std)')\n print('f1_micro: ', round(f1_micro_list.mean(), 4), round(f1_micro_list.std(), 4))\n print('f1_macro: ', round(f1_macro_list.mean(), 4), round(f1_macro_list.std(), 4))\n print('auc_roc: ', round(auc_roc_list.mean(), 4), round(auc_roc_list.std(), 4))\n \n print(' ')\n print('brier score: ', round(brier_list.mean(), 4), round(brier_list.std(), 4))\n print(' ')\n \n print('Time (sec) (Mean/Std)')\n print('training: ', round(training_time.mean(), 4), round(training_time.std(), 4))\n print('inference: ', round(inference_time.mean(), 4), round(inference_time.std(), 4))\n \n print(' ')\n return dfs\n\ndef machine_f1(dfs):\n res = []\n del_rate = [0.0, 0.1, 0.2, 0.3]\n for i in del_rate:\n res_i = []\n for df in dfs:\n size = int(len(df['y_true'])*i)\n if i == 0:\n res_i.append((f1_score(df['y_true'].values, df['y_pred'].values, average='macro')))\n else:\n res_i.append((f1_score(df['y_true'][:-size].values, df['y_pred'][:-size].values, average='macro')))\n res.append(res_i)\n \n res = np.array(res).mean(axis=1)\n growth = [0]\n for i in range(1, len(res)):\n growth.append(( (res[i] - res[i-1]) / res[i-1]))\n \n print('Model Performance (deletion rate / macro-f1 / groth)')\n for i in range(len(res)):\n print(\"%s: %s (+%s) \" % (del_rate[i], round(res[i], 4), round(growth[i], 4)) )\n print('')\n\ndef compute_moderation_effort(dfs, p_oracle = 1):\n print('p_oracle: ', p_oracle)\n y_gold = None\n \n if p_oracle < 1:\n y_gold = []\n for df in dfs:\n y_true = df['y_true'].values\n n_labels = len(set(y_true))\n\n human_labells = []\n for i in y_true:\n irand = random()\n if (irand > p_oracle):\n human_labells.append(randint(0, n_labels-1))\n else:\n human_labells.append(i)\n\n y_gold.append(human_labells)\n \n mod_efforts = []\n j = 0\n for df in dfs:\n mod_effort = []\n y_true = df['y_true'].values\n \n\n y_pred = df['y_pred'].values\n for i in tqdm(range(len(y_true)), position=0, leave=True):\n ai = y_pred[:len(y_true)-i]\n if not y_gold:\n human = y_true[len(ai):]\n else:\n human = y_gold[j][len(ai):]\n mod_y = np.concatenate([ai, human])\n f1 = f1_score(y_true, mod_y, average='macro')\n mod_effort.append(f1)\n mod_efforts.append(mod_effort)\n j = j +1\n return mod_efforts\n\ndef eval_moderation_effort(mod_efforts):\n fontsize = 20\n \n mod_effort_mean = np.array(mod_efforts).mean(axis=0)\n mod_effort_std = np.array(mod_efforts).std(axis=0)\n\n mod_effort = np.array(mod_efforts).mean(axis=0)\n plt.plot(mod_effort, label='Uncertainty')\n plt.plot([0, len(mod_effort)], [mod_effort[0], 1], 'black', linestyle='dashed', label='Random')\n plt.fill_between(range(len(mod_effort)), mod_effort_mean-mod_effort_std, mod_effort_mean+mod_effort_std, alpha=.3)\n plt.xlim((0, len(mod_effort)))\n plt.yticks(np.arange(_round_05(mod_effort_mean[0]), 1.05, 0.05), fontsize=fontsize)\n plt.ylabel('F1-Score', fontsize=fontsize)\n plt.xlabel('Moderation Effort', fontsize=fontsize)\n plt.xticks(np.arange(0, len(mod_effort)+1, len(mod_effort)/5), ['0%', '20%', '40%', '60%', '80%', '100%'], fontsize=fontsize)\n plt.ylim((mod_effort[0], 1.0005))\n l = plt.legend(frameon=True, fontsize=fontsize, title=\"Moderation Strategy\", fancybox=True)\n plt.setp(l.get_title(),fontsize=18)\n plt.show()\n\n print('Moderation Effort Needed')\n \n for i in [.81, .83, .85, .87, .89, .91, .93, .95, .97, .99]:\n try:\n print(i, 'f1 =>', round(np.where(mod_effort_mean>=i)[0][0]/len(mod_effort_mean), 4), 'effort')\n except IndexError:\n print(np.array(mod_effort_mean).max())\n break;\n \ndef print_all_stats(dfs, path):\n i = 0\n for df in dfs:\n df.to_pickle(\"./dfs/df_%s_%s.pkl\" % (path, i))\n i = i + 1\n \n machine_f1(dfs)\n mod_efforts = compute_moderation_effort(dfs)\n np.save('./dfs/mod_100_%s' % path, mod_efforts)\n \n eval_moderation_effort(mod_efforts)\n mod_efforts = compute_moderation_effort(dfs, 0.95)\n np.save('./dfs/mod_95_%s' % path, mod_efforts)\n \n eval_moderation_effort(mod_efforts) \n mod_efforts = compute_moderation_effort(dfs, 0.9)\n np.save('./dfs/mod_90_%s' % path, mod_efforts)\n \n eval_moderation_effort(mod_efforts)\n \n print(' ')\n print('#############################')\n print(' ')\n " ]
[ [ "matplotlib.pyplot.legend", "sklearn.metrics.roc_auc_score", "numpy.sum", "numpy.unique", "matplotlib.pyplot.ylim", "numpy.save", "pandas.DataFrame", "matplotlib.pyplot.plot", "numpy.concatenate", "tensorflow.keras.utils.to_categorical", "scipy.stats.entropy", "matplotlib.pyplot.xlabel", "sklearn.metrics.f1_score", "numpy.array", "numpy.where", "sklearn.model_selection.StratifiedShuffleSplit", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
yujiatay/deep-motion-editing
[ "19604abdc0ead66f8c82d9211b8c5862c6a68089" ]
[ "options/options.py" ]
[ "import argparse\nimport torch\nimport models\n\n\nclass Options():\n\n def __init__(self):\n self.initialized = False\n\n def initialize(self, parser):\n parser.add_argument('--input_A', required=True, help='path to first bvh file')\n parser.add_argument('--input_B', required=True, help='path to second bvh file')\n parser.add_argument('--model_path', required=True, help='path to second bvh file')\n parser.add_argument('--edit_type', type=str, default='retargeting', help='name of the motion editing operation, retargeting or style_transfer')\n parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n\n return parser\n\n def gather_options(self):\n\n if not self.initialized: # check if it has been initialized\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser = self.initialize(parser)\n\n # get the basic options\n opt, _ = parser.parse_known_args()\n\n # modify model-related parser options\n model_name = opt.model\n model_option_setter = models.get_option_setter(model_name)\n parser = model_option_setter(parser, self.isTrain)\n opt, _ = parser.parse_known_args() # parse again with new defaults\n\n # modify dataset-related parser options\n dataset_name = opt.dataset_mode\n dataset_option_setter = data.get_option_setter(dataset_name)\n parser = dataset_option_setter(parser, self.isTrain)\n\n # save and return the parser\n self.parser = parser\n return parser.parse_args()\n\n def print_options(self, opt):\n message = ''\n message += '----------------- Options ---------------\\n'\n for k, v in sorted(vars(opt).items()):\n comment = ''\n default = self.parser.get_default(k)\n if v != default:\n comment = '\\t[default: %s]' % str(default)\n message += '{:>25}: {:<30}{}\\n'.format(str(k), str(v), comment)\n message += '----------------- End -------------------'\n print(message)\n\n def parse(self):\n opt = self.gather_options()\n opt.isTrain = self.isTrain # train or test\n\n # process opt.suffix\n if opt.suffix:\n suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''\n opt.name = opt.name + suffix\n\n self.print_options(opt)\n\n # set gpu ids\n str_ids = opt.gpu_ids.split(',')\n opt.gpu_ids = []\n for str_id in str_ids:\n id = int(str_id)\n if id >= 0:\n opt.gpu_ids.append(id)\n if len(opt.gpu_ids) > 0:\n torch.cuda.set_device(opt.gpu_ids[0])\n\n self.opt = opt\n return self.opt\n" ]
[ [ "torch.cuda.set_device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
abonaca/spur_rv
[ "5b90a4b62732f9c6941fc053e4cec0f9e90fb3e4" ]
[ "scripts/vel.py" ]
[ "import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nfrom astropy.table import Table\nimport astropy.units as u\nimport astropy.coordinates as coord\nfrom astropy.io import ascii, fits\nimport astropy.table\n\n#from pyia import GaiaData\nimport gala.coordinates as gc\nimport glob\nimport pickle\n\n#from spec import *\n#from spec import get_date\nwangle = 180*u.deg\n\ndef gd1_dist(phi1):\n # 0, 10\n # -60, 7\n m = (10-7) / (60)\n return (m*phi1.wrap_at(180*u.deg).value + 10) * u.kpc\n\ndef show_vel():\n \"\"\"\"\"\"\n t4 = Table.read('../data/gd1_4_vels.tab', format='ascii.commented_header', delimiter='\\t')\n t5 = Table.read('../data/gd1_5_vels.tab', format='ascii.commented_header', delimiter='\\t')\n \n r55 = t5['rank']==5\n r45 = t4['rank']==5\n print(np.median(t5['VELOCITY'][r55]))\n print(np.median(t4['VELOCITY'][r45]))\n \n deltav = np.median(t4['VELOCITY'][r45]) - np.median(t5['VELOCITY'][r55])\n t4['VELOCITY'] -= deltav\n print(np.median(t5['VELOCITY'][r55]))\n print(np.median(t4['VELOCITY'][r45]))\n \n r51 = t5['rank']==1\n r41 = t4['rank']==1\n \n print(np.median(t5['VELOCITY'][r51]))\n print(np.median(t4['VELOCITY'][r41]))\n \n vbins = np.linspace(-200,200,50)\n \n plt.close()\n plt.figure()\n \n plt.hist(t4['VELOCITY'], bins=vbins, histtype='step', color='navy')\n plt.hist(t5['VELOCITY'], bins=vbins, histtype='step', color='orange')\n \n plt.hist(t4['VELOCITY'][r41], bins=vbins, histtype='stepfilled', alpha=0.2, color='navy')\n plt.hist(t5['VELOCITY'][r51], bins=vbins, histtype='stepfilled', alpha=0.2, color='orange')\n \n \n plt.tight_layout()\n\ndef spur_vel():\n \"\"\"\"\"\"\n\n trank = Table.read('/home/ana/observing/Hectochelle/2019A/xfitfibs/gd1_catalog.cat', format='ascii.fixed_width_two_line', delimiter='\\t')\n t1 = Table.read('../data/gd1_1_vels.tab', format='ascii.commented_header', delimiter='\\t')\n good = t1['CZXCR']>3.5\n t1 = t1[good]\n rank1 = trank['rank'][t1['object']]\n r11 = rank1==1\n r1s = rank1<4\n\n t2 = Table.read('../data/gd1_2_vels.tab', format='ascii.commented_header', delimiter='\\t')\n good = t2['CZXCR']>3.5\n t2 = t2[good]\n rank2 = trank['rank'][t2['object']]\n r21 = rank2==1\n r2s = rank2<4\n\n t4 = Table.read('../data/gd1_4_vels.tab', format='ascii.commented_header', delimiter='\\t')\n good = t4['CZXCR']>3.5\n t4 = t4[good]\n r41 = t4['rank']==1\n r4s = t4['rank']<4\n\n vbins = np.linspace(-200,200,100)\n\n plt.close()\n plt.figure()\n \n plt.hist(t4['VELOCITY'][r4s], bins=vbins, histtype='step', label='GD-1')\n plt.hist(t1['VELOCITY'][r1s], bins=vbins, histtype='step', label='Spur')\n plt.hist(t2['VELOCITY'][r2s], bins=vbins, histtype='step', label='Spur 2')\n \n plt.xlabel('Radial velocity [km s$^{-1}$]')\n plt.ylabel('Number')\n \n plt.legend()\n plt.tight_layout()\n\ndef rv_ra(flag=True, verbose=False):\n \"\"\"\"\"\"\n \n trank = Table.read('/home/ana/observing/Hectochelle/2019A/xfitfibs/gd1_catalog.cat', format='ascii.fixed_width_two_line', delimiter='\\t')\n t1 = Table.read('../data/gd1_1_vels.tab', format='ascii.commented_header', delimiter='\\t')\n if flag:\n good = t1['CZXCR']>3.5\n t1 = t1[good]\n rank1 = trank['rank'][t1['object']]\n r11 = rank1==1\n r1s = rank1<4\n\n t2 = Table.read('../data/gd1_2_vels.tab', format='ascii.commented_header', delimiter='\\t')\n if flag:\n good = t2['CZXCR']>3.5\n t2 = t2[good]\n rank2 = trank['rank'][t2['object']]\n r21 = rank2==1\n r2s = rank2<4\n\n t4 = Table.read('../data/gd1_4_vels.tab', format='ascii.commented_header', delimiter='\\t')\n if flag:\n good = t4['CZXCR']>3.5\n t4 = t4[good]\n r41 = t4['rank']==1\n r4s = t4['rank']<4\n \n kop_vr = ascii.read(\"\"\"phi1 phi2 vr err\n-45.23 -0.04 28.8 6.9\n-43.17 -0.09 29.3 10.2\n-39.54 -0.07 2.9 8.7\n-39.25 -0.22 -5.2 6.5\n-37.95 0.00 1.1 5.6\n-37.96 -0.00 -11.7 11.2\n-35.49 -0.05 -50.4 5.2\n-35.27 -0.02 -30.9 12.8\n-34.92 -0.15 -35.3 7.5\n-34.74 -0.08 -30.9 9.2\n-33.74 -0.18 -74.3 9.8\n-32.90 -0.15 -71.5 9.6\n-32.25 -0.17 -71.5 9.2\n-29.95 -0.00 -92.7 8.7\n-26.61 -0.11 -114.2 7.3\n-25.45 -0.14 -67.8 7.1\n-24.86 0.01 -111.2 17.8\n-21.21 -0.02 -144.4 10.5\n-14.47 -0.15 -179.0 10.0\n-13.73 -0.28 -191.4 7.5\n-13.02 -0.21 -162.9 9.6\n-12.68 -0.26 -217.2 10.7\n-12.55 -0.23 -172.2 6.6\"\"\")\n \n cg = gc.GD1(kop_vr['phi1']*u.deg, kop_vr['phi2']*u.deg)\n ceq = cg.transform_to(coord.ICRS)\n \n # model\n pkl = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial.pkl', 'rb'))\n cmg = pkl['cg']\n cmeq = cmg.transform_to(coord.ICRS)\n \n plt.close()\n plt.figure(figsize=(8,6))\n \n #plt.plot(t1['rad'][r1s], t1['VELOCITY'][r1s], 'ko')\n #plt.plot(t2['rad'][r2s], t2['VELOCITY'][r2s], 'ko')\n #plt.plot(t4['rad'][r4s], t4['VELOCITY'][r4s], 'ko')\n \n if verbose:\n print(np.median(t4['VELOCITY'][r41]), np.median(t1['VELOCITY'][r11]), np.median(t2['VELOCITY'][r21]))\n print(np.std(t4['VELOCITY'][r41]), np.std(t1['VELOCITY'][r11]), np.std(t2['VELOCITY'][r21]))\n print(t1['VELOCITY'][r11])\n\n plt.plot(ceq.ra, kop_vr['vr'], 'o', color='0.5', label='GD-1 (Koposov)')\n plt.plot(t4['rad'][r41], t4['VELOCITY'][r41], 'wo', mec='k', label='GD-1 (hecto)')\n\n plt.plot(t1['rad'][r11], t1['VELOCITY'][r11], 'ko', label='Spur (hecto)')\n plt.plot(t2['rad'][r21], t2['VELOCITY'][r21], 'ko', label='')\n \n plt.plot(cmeq.ra, cmeq.radial_velocity, 'ro', ms=1, label='Model (Bonaca+2018)')\n \n plt.xlabel('R.A. [deg]')\n plt.ylabel('Radial velocity [km s$^{-1}$]')\n plt.legend(frameon=False, fontsize='small')\n \n plt.ylim(-250,100)\n plt.xlim(140,180)\n \n plt.tight_layout()\n plt.savefig('../plots/gd1_rv_ra_{:d}.png'.format(flag))\n\n\ndef payne_info():\n \"\"\"Explore Payne results\"\"\"\n \n t = Table.read('../data/GD1_TPv1.0.dat', format='ascii')\n #t.pprint()\n \n id_done = np.int64(t['star'])\n \n tin = Table.read('../data/gd1_input_catalog.fits')\n tin = tin[id_done]\n \n #print(tin.colnames, t.colnames)\n verr = 0.5*(t['Vrad_lerr'] + t['Vrad_uerr'])\n \n plt.close()\n plt.figure(figsize=(10,6))\n \n plt.plot(tin['g'], verr, 'ko')\n\n plt.xlabel('g [mag]')\n plt.ylabel('$V_{err}$ [km s$^{-1}$]')\n \n plt.ylim(0.001,10)\n plt.gca().set_yscale('log')\n \n plt.tight_layout()\n #plt.savefig('../plots/payne_verr.png')\n\ndef verr_corr():\n \"\"\"Correlations of radial velocity uncertainty with other stellar parameters\"\"\"\n \n t = Table.read('../data/GD1_TPv1.0.dat', format='ascii')\n \n id_done = np.int64(t['star'])\n \n tin = Table.read('../data/gd1_input_catalog.fits')\n tin = tin[id_done]\n \n verr = 0.5*(t['Vrad_lerr'] + t['Vrad_uerr'])\n \n plt.close()\n plt.figure(figsize=(10,6))\n \n #plt.plot(t['Vrot'], verr, 'ko')\n im = plt.scatter(tin['g'], verr, c=t['Vrot'], s=60, vmin=0, vmax=50, cmap=mpl.cm.viridis)\n\n plt.xlabel('g [mag]')\n plt.ylabel('$\\sigma_{V_{rad}}$ [km s$^{-1}$]')\n \n plt.ylim(0.001,20)\n plt.gca().set_yscale('log')\n \n divider = make_axes_locatable(plt.gca())\n cax = divider.append_axes(\"right\", size=\"3%\", pad=0.1)\n plt.colorbar(im, cax=cax) #, ticks=np.arange(0,51,25))\n plt.ylabel('$V_{rot}$ [km s$^{-1}$]')\n \n plt.tight_layout()\n plt.savefig('../plots/payne_verr.png')\n\ndef rv_catalog():\n \"\"\"Generate a radial velocity catalog of likely GD-1 members\"\"\"\n \n wangle = 180*u.deg\n \n trank = Table.read('/home/ana/observing/Hectochelle/2019A/xfitfibs/gd1_catalog.cat', format='ascii.fixed_width_two_line', delimiter='\\t')\n\n tlit = Table.read('/home/ana/projects/vision/data/koposov_vr.dat', format='ascii.commented_header')\n cg = gc.GD1(tlit['phi1']*u.deg, tlit['phi2']*u.deg)\n ceq = cg.transform_to(coord.ICRS)\n \n ra = ceq.ra\n dec = ceq.dec\n phi1 = cg.phi1.wrap_at(wangle)\n phi2 = cg.phi2\n vr = tlit['vr']*u.km/u.s\n vre = tlit['err']*u.km/u.s\n ref = np.zeros(len(tlit))\n \n for e, ind in enumerate([1,2,4]):\n tin = Table.read('../data/gd1_{:1d}_vels.tab'.format(ind), format='ascii.commented_header', delimiter='\\t')\n #if ind>=4:\n #rank = tin['rank']\n #xcr = 3\n #else:\n rank = trank['rank'][tin['object']]\n xcr = 3.5\n \n keep = (tin['CZXCR']>xcr) & (rank<4)\n #print(ind, np.sum(keep))\n #if ind==5:\n #tin[rank<4].pprint()\n tin = tin[keep]\n c_ = coord.ICRS(ra=tin['rad']*u.deg, dec=tin['decd']*u.deg)\n cg_ = c_.transform_to(gc.GD1)\n \n ra = np.concatenate([ra, tin['rad']])\n dec = np.concatenate([dec, tin['decd']])\n phi1 = np.concatenate([phi1, cg_.phi1.wrap_at(wangle)])\n phi2 = np.concatenate([phi2, cg_.phi2])\n vr = np.concatenate([vr, tin['VELOCITY']])\n if ind!=5:\n vre = np.concatenate([vre, tin['CZXCERR']])\n else:\n vre = np.concatenate([vre, np.ones(len(tin))])\n ref = np.concatenate([ref, np.ones(len(tin))*ind])\n \n tout = Table([ra, dec, phi1, phi2, vr, vre, ref], names=('ra', 'dec', 'phi1', 'phi2', 'vr', 'vre', 'ref'))\n tout.pprint()\n \n tout.write('../data/gd1_vr_2019.fits', overwrite=True)\n\ndef rv_map():\n \"\"\"\"\"\"\n wangle = 180*u.deg\n cmodel = mpl.cm.Blues(0.9)\n cmodel = '0.5'\n \n pkl = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial_perturb_python3.pkl', 'rb'))\n cmg = pkl['cg']\n c1 = pkl['cg']\n \n pkl0 = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial_noperturb_python3.pkl', 'rb'))\n cmg0 = pkl0['cg']\n c0 = pkl0['cg']\n \n # polynomial fit to the track\n ind = (cmg0.phi1.wrap_at(wangle)<0*u.deg) & (cmg0.phi1.wrap_at(wangle)>-80*u.deg)\n prv = np.polyfit(cmg0.phi1.wrap_at(wangle)[ind], cmg0.radial_velocity[ind], 3)\n polyrv = np.poly1d(prv)\n \n pmu1 = np.polyfit(c0.phi1.wrap_at(180*u.deg)[ind], c0.pm_phi1_cosphi2[ind].to(u.mas/u.yr), 3)\n polymu1 = np.poly1d(pmu1)\n \n pmu2 = np.polyfit(c0.phi1.wrap_at(180*u.deg)[ind], c0.pm_phi2[ind].to(u.mas/u.yr), 3)\n polymu2 = np.poly1d(pmu2)\n \n # members\n tmem = Table.read('/home/ana/data/gd1-better-selection.fits')\n #print(tmem.colnames)\n \n # rv observations\n #trv = Table.read('/home/ana/projects/vision/data/gd1_vr.fits')\n trv = Table.read('../data/gd1_vr_2019.fits')\n #print(np.array(trv['phi2']))\n \n plt.close()\n fig, ax = plt.subplots(2,1,figsize=(10,5.5), sharex=True)\n \n plt.sca(ax[0])\n #plt.plot(cmg.phi1.wrap_at(wangle), cmg.phi2, '.', color=cmodel, ms=2)\n plt.plot(tmem['phi1'], tmem['phi2'], 'k.', ms=2.5, label='Observed GD-1')\n #plt.scatter(tmem['phi1'], tmem['phi2'], s=tmem['stream_prob']*2, c=tmem['stream_prob'], cmap=mpl.cm.binary, vmin=0.5, vmax=1.1, rasterized=True)\n #plt.plot(trv['phi1'], trv['phi2'], 'wo', mec='k')\n \n plt.xlim(-70, -10)\n plt.ylim(-6,6)\n plt.ylabel('$\\phi_2$ [deg]')\n \n \n plt.sca(ax[1])\n plt.plot(cmg.phi1.wrap_at(wangle), cmg.radial_velocity - polyrv(cmg.phi1.wrap_at(wangle))*u.km/u.s, '.', color=cmodel, ms=2, label='GD-1 model')\n #plt.plot(trv['phi1'], trv['vr'] - polyrv(trv['phi1']), 'wo', mec='k')\n #plt.errorbar(trv['phi1'], trv['vr'] - polyrv(trv['phi1']), yerr=trv['vre'], fmt='none', color='k', mec='k')\n \n plt.ylim(-30,30)\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\Delta$ $V_r$ [km s$^{-1}$]')\n \n colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red']\n colors = ['steelblue', mpl.cm.Oranges(0.4), mpl.cm.Oranges(0.8), mpl.cm.Oranges(0.6)]\n msize = [5, 7, 7, 7]\n markers = ['o', 'D', 'D', 'D']\n labels = ['Koposov et al. (2010)', '', '', 'Hectochelle 2019A']\n \n for e,i in enumerate(np.unique(trv['ref'])[:]):\n ind = trv['ref']==i\n color = colors[e]\n ms = msize[e]\n marker = markers[e]\n \n plt.sca(ax[0])\n plt.plot(trv['phi1'][ind], trv['phi2'][ind], marker, ms=ms, color=color, label='')\n \n plt.sca(ax[1])\n plt.plot(trv['phi1'][ind], trv['vr'][ind] - polyrv(trv['phi1'][ind]), marker, ms=ms, color=color, label=labels[e])\n plt.errorbar(trv['phi1'][ind], trv['vr'][ind] - polyrv(trv['phi1'][ind]), yerr=trv['vre'][ind], fmt='none', color=color, label='')\n \n plt.sca(ax[0])\n plt.legend(loc=2, fontsize='small', handlelength=0.4)\n \n plt.sca(ax[1])\n plt.legend(loc=2, fontsize='small', handlelength=0.4)\n \n plt.tight_layout(h_pad=0)\n #plt.savefig('/home/ana/proposals/CfA2019B/hecto_gd1spur/plots/vr_map.pdf')\n plt.savefig('../plots/vr_map.pdf')\n\n\n## field diagnostics\n\ndef field_stats():\n \"\"\"\"\"\"\n t = Table.read('../data/gd1_chelle.fits')\n ind = (t['rank']<4) & (np.abs(t['dvr'])<20) & (t['dvr']<2)\n t = t[ind]\n \n for e, f in enumerate(np.unique(t['field'])):\n ind = t['field'] ==f\n t_ = t[ind]\n print('{} {:.1f} {:.1f}'.format(f, np.median(t_['dvr']), np.std(t_['dvr'])))\n \n ind = (t['field']>=2) & (t['field']<=6)\n t_ = t[ind]\n print('spur {:.1f} {:.1f}'.format(np.median(t_['dvr']), np.std(t_['dvr'])))\n \n t_ = t[~ind]\n print('stream {:.1f} {:.1f}'.format(np.median(t_['dvr']), np.std(t_['dvr'])))\n \n # bootstrap uncertainties\n np.random.seed(477428)\n N = len(t)\n Nsample = 1000\n dvr = np.random.randn(N*Nsample).reshape(N, Nsample) + t['dvr'][:,np.newaxis]\n \n dvr_ = dvr[ind]\n med_spur = np.median(dvr_, axis=0)\n std_spur = np.std(dvr_, axis=0)\n print('spur bootstrap {:.1f} +- {:.1f}, {:.2f} +- {:.2f}'.format(np.median(med_spur), np.std(med_spur), np.median(std_spur), np.std(std_spur)))\n \n dvr_ = dvr[~ind]\n med_stream = np.median(dvr_, axis=0)\n std_stream = np.std(dvr_, axis=0)\n print('stream bootstrap {:.1f} +- {:.1f}, {:.2f} +- {:.2f}'.format(np.median(med_stream), np.std(med_stream), np.median(std_stream), np.std(std_stream)))\n \n bins_med = np.linspace(-12,-3,30)\n bins_std = np.linspace(0,6,30)\n \n plt.close()\n fig, ax = plt.subplots(1,2,figsize=(10,5))\n \n plt.sca(ax[0])\n plt.hist(med_spur, bins=bins_med, alpha=0.3, density=True)\n plt.hist(med_stream, bins=bins_med, alpha=0.3, density=True)\n \n plt.xlabel('Median $\\Delta V_r$ [km s$^{-1}$]')\n plt.ylabel('Density')\n \n plt.sca(ax[1])\n plt.hist(std_spur, bins=bins_std, alpha=0.3, density=True, label='Spur')\n plt.hist(std_stream, bins=bins_std, alpha=0.3, density=True, label='Stream')\n \n plt.xlabel('STD $\\Delta V_r$ [km s$^{-1}$]')\n plt.ylabel('Density')\n plt.legend(frameon=False, fontsize='small', loc=2, handlelength=1.5)\n \n plt.tight_layout()\n plt.savefig('../plots/stream_spur_dvr_moments.png', dpi=200)\n\ndef priorities():\n \"\"\"Plot a CMD color-coded by rank\"\"\"\n \n t = Table.read('../data/gd1_chelle.fits')\n for r in np.unique(t['rank']):\n print(r, np.sum(t['rank']==r))\n \n ind = t['rank']<5\n t5 = t[~ind]\n t = t[ind]\n \n plt.close()\n plt.figure(figsize=(5,10))\n \n plt.plot(t5['g0'] - t5['i0'], t5['g0'], 'k.', ms=3, mew=0, alpha=0.5, zorder=0)\n plt.scatter(t['g0'] - t['i0'], t['g0'], c=t['rank'], s=15, vmin=1, vmax=5, cmap='Oranges_r')\n \n plt.xlabel('g - i')\n plt.ylabel('g')\n plt.xlim(-1,2)\n plt.ylim(20.5,13.5)\n plt.gca().set_aspect('equal')\n plt.savefig('../plots/diag_cmd_rank.png')\n\ndef cmd_vr():\n \"\"\"\"\"\"\n \n t = Table.read('../data/gd1_chelle.fits')\n ind = (np.abs(t['vr'])>500) & (t['rank']<4)\n tb = t[ind]\n \n ind = (t['rank']<4) & (np.abs(t['dvr'])<20)\n t = t[ind]\n \n plt.close()\n fig, ax = plt.subplots(2,4,figsize=(10,8), sharex=True, sharey=True)\n \n plt.sca(ax[1][2])\n plt.plot(tb['g0'] - tb['i0'], tb['g0'], 'ko', ms=2, mew=0, alpha=0.5)\n plt.scatter(t['g0'] - t['i0'], t['g0'], c=t['dvr'], s=20, vmin=-20, vmax=20, cmap='magma')\n \n plt.xlim(0,0.9)\n plt.ylim(20.5,16)\n plt.xlabel('g - i')\n \n plt.sca(ax[1][3])\n plt.axis('off')\n \n for e, f in enumerate(np.unique(t['field'])):\n irow = np.int64(e/4)\n icol = e%4\n plt.sca(ax[irow][icol])\n \n ind = t['field'] == f\n t_ = t[ind]\n plt.scatter(t_['g0'] - t_['i0'], t_['g0'], c=t_['dvr'], s=40, vmin=-20, vmax=20, cmap='magma')\n \n ind = tb['field'] == f\n tb_ = tb[ind]\n plt.plot(tb_['g0'] - tb_['i0'], tb_['g0'], 'ko', ms=4, mew=0, alpha=0.5)\n \n plt.text(0.05,0.9, '{:.1f}, {:.1f}'.format(np.median(t_['phi1']), np.median(t_['phi2'])), transform=plt.gca().transAxes, fontsize='small')\n \n if icol==0:\n plt.ylabel('g')\n if irow==1:\n plt.xlabel('g - i')\n \n plt.tight_layout(h_pad=0.1, w_pad=0.01)\n plt.savefig('../plots/fields_cmd_vr.png')\n\n## misc\n\ndef vr_gradient():\n \"\"\"Plot radial velocity along the stream in the fiducial, non-perturbed model\"\"\"\n \n pkl0 = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial_noperturb_python3.pkl', 'rb'))\n c0 = pkl0['cg']\n \n pkl = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial_perturb_python3.pkl', 'rb'))\n c = pkl['cg']\n\n ind_fit = (c0.phi1.wrap_at(wangle)>-100*u.deg) & (c0.phi1.wrap_at(wangle)<20*u.deg)\n pfit = np.polyfit(c0.phi1.wrap_at(wangle).value[ind_fit], c0.radial_velocity.value[ind_fit], 6)\n poly = np.poly1d(pfit)\n \n plt.close()\n fig, ax = plt.subplots(2,1,figsize=(10,10), sharex=True)\n \n plt.sca(ax[0])\n plt.plot(c0.phi1.wrap_at(wangle), c0.radial_velocity, 'k.', ms=1)\n plt.plot(c.phi1.wrap_at(wangle), c.radial_velocity, 'r.', ms=1)\n\n plt.ylabel('$V_r$ [km s$^{-1}$]')\n plt.ylim(-500,500)\n \n plt.sca(ax[1])\n plt.plot(c0.phi1.wrap_at(wangle), c0.radial_velocity.value - poly(c0.phi1.wrap_at(wangle).value), 'ko', ms=1)\n plt.plot(c.phi1.wrap_at(wangle), c.radial_velocity.value - poly(c.phi1.wrap_at(wangle).value), 'ro', ms=1)\n \n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\Delta$ $V_r$ [km s$^{-1}$]')\n plt.xlim(-100,20)\n plt.ylim(-20,20)\n plt.tight_layout()\n \n plt.savefig('../plots/model_vr_gradient.png')\n\ndef h3():\n \"\"\"Observed H3 stars in GD-1 coordinates\"\"\"\n \n t = Table.read('/home/ana/data/rcat_V1.4_MSG.fits')\n c = coord.ICRS(ra=t['RA']*u.deg, dec=t['DEC']*u.deg)\n cg = c.transform_to(gc.GD1)\n #print(t.colnames)\n \n tmem = Table.read('/home/ana/data/gd1-better-selection.fits')\n print(tmem.colnames)\n \n plt.close()\n plt.figure(figsize=(15,10))\n \n #plt.plot(cg.phi1.wrap_at(wangle), cg.phi2, '.', color='tab:blue', ms=1)\n #plt.plot(tmem['phi1'], tmem['phi2'], 'k.', ms=2.5, label='Observed GD-1')\n #plt.xlim(-80,0)\n #plt.ylim(-10,10)\n \n plt.plot(c.ra, c.dec, '.', color='tab:blue', ms=1)\n plt.plot(tmem['ra'], tmem['dec'], 'k.', ms=1, label='Observed GD-1', alpha=0.5)\n plt.scatter(tmem['ra'], tmem['dec'], c=tmem['phi1'], vmin=-100, vmax=20)\n \n plt.xlim(360,0)\n plt.gca().set_aspect('equal')\n plt.tight_layout()\n\n\n## 2019 summary\n\ndef check_input_catalogs():\n \"\"\"\"\"\"\n #t1 = Table.read('/home/ana/observing/Hectochelle/2019A/data/gd1_input_catalog.fits')\n t1 = Table.read('../data/gd1_input_catalog.fits')\n print(t1.colnames, len(t1))\n \n t2 = Table.read('/home/ana/observing/Hectochelle/2019A/xfitfibs/gd1_catalog.cat', format='ascii.fixed_width_two_line', delimiter='\\t', fill_values='')\n targets = t2['rank']>0\n t2 = t2[targets]\n print(t2.colnames, len(t2))\n \n ra = coord.Angle(t2['ra'][::100], unit=u.hour).deg\n dec = coord.Angle(t2['dec'][::100], unit=u.degree).deg\n \n plt.close()\n fig, ax = plt.subplots(2,1,figsize=(10,10), sharex=True, sharey=True)\n \n plt.sca(ax[0])\n plt.plot(t1['ra'][::100], t1['dec'][::100], 'k.', rasterized=True)\n \n plt.sca(ax[1])\n plt.plot(ra, dec, 'k.', rasterized=True)\n \n plt.tight_layout()\n\ndef chelle_catalog():\n \"\"\"\"\"\"\n wangle = 180*u.deg\n \n trank = Table.read('/home/ana/observing/Hectochelle/2019A/xfitfibs/gd1_catalog.cat', format='ascii.fixed_width_two_line', delimiter='\\t', fill_values='')\n tphot = Table.read('../data/gd1_input_catalog.fits')\n\n tlit = Table.read('/home/ana/projects/vision/data/koposov_vr.dat', format='ascii.commented_header')\n cg = gc.GD1(tlit['phi1']*u.deg, tlit['phi2']*u.deg)\n ceq = cg.transform_to(coord.ICRS)\n \n ra = ceq.ra\n dec = ceq.dec\n phi1 = cg.phi1.wrap_at(wangle)\n phi2 = cg.phi2\n vr = tlit['vr']*u.km/u.s\n vre = tlit['err']*u.km/u.s\n ref = np.zeros(len(tlit))\n \n tin = Table.read('../data/gd1_2019_all.tab', format='ascii.commented_header', delimiter='\\t')\n rank = trank['rank'][tin['object']]\n xcr = 0\n keep = (tin['CZXCR']>xcr) #& (rank<5)\n \n # construct the catalog\n tin = tin[keep]\n \n # positions\n c = coord.ICRS(ra=tin['rad']*u.deg, dec=tin['decd']*u.deg)\n cg = c.transform_to(gc.GD1)\n \n # velocity differential\n # polynomial fit to the track\n pkl0 = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial_noperturb_python3.pkl', 'rb'))\n cmg0 = pkl0['cg']\n c0 = pkl0['cg']\n \n ind = (cmg0.phi1.wrap_at(wangle)<0*u.deg) & (cmg0.phi1.wrap_at(wangle)>-80*u.deg)\n prv = np.polyfit(cmg0.phi1.wrap_at(wangle)[ind], cmg0.radial_velocity[ind], 3)\n polyrv = np.poly1d(prv)\n \n drv = tin['VELOCITY'] - polyrv(cg.phi1.wrap_at(wangle))\n \n # photometry\n g = tphot['g'][tin['object']] - tphot['A_g'][tin['object']]\n r = tphot['r'][tin['object']] - tphot['A_r'][tin['object']]\n i = tphot['i'][tin['object']] - tphot['A_i'][tin['object']]\n \n tout = Table([tin['rad'], tin['decd'], cg.phi1.wrap_at(wangle), cg.phi2, tin['VELOCITY'], tin['CZXCERR'], drv, tin['field'], rank[keep], g, r, i], names=('ra', 'dec', 'phi1', 'phi2', 'vr', 'vre', 'dvr', 'field', 'rank', 'g0', 'r0', 'i0'))\n tout.pprint()\n tout.write('../data/gd1_chelle.fits', overwrite=True)\n\ndef chelle_map():\n wangle = 180*u.deg\n cmodel = mpl.cm.Blues(0.9)\n cmodel = '0.5'\n \n pkl = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial_perturb_python3.pkl', 'rb'))\n cmg = pkl['cg']\n c1 = pkl['cg']\n \n pkl0 = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial_noperturb_python3.pkl', 'rb'))\n cmg0 = pkl0['cg']\n c0 = pkl0['cg']\n \n # polynomial fit to the track\n ind = (cmg0.phi1.wrap_at(wangle)<0*u.deg) & (cmg0.phi1.wrap_at(wangle)>-80*u.deg)\n prv = np.polyfit(cmg0.phi1.wrap_at(wangle)[ind], cmg0.radial_velocity[ind], 3)\n polyrv = np.poly1d(prv)\n \n pmu1 = np.polyfit(c0.phi1.wrap_at(180*u.deg)[ind], c0.pm_phi1_cosphi2[ind].to(u.mas/u.yr), 3)\n polymu1 = np.poly1d(pmu1)\n \n pmu2 = np.polyfit(c0.phi1.wrap_at(180*u.deg)[ind], c0.pm_phi2[ind].to(u.mas/u.yr), 3)\n polymu2 = np.poly1d(pmu2)\n \n # members\n #tmem = Table.read('/home/ana/data/gd1-better-selection.fits')\n #tmem = Table.read('/home/ana/projects/gd1_spur/data/members.fits')\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n \n # rv observations\n #trv = Table.read('/home/ana/projects/vision/data/gd1_vr.fits')\n trv = Table.read('../data/gd1_chelle.fits')\n #print(np.array(trv['phi2']))\n \n plt.close()\n fig, ax = plt.subplots(2,1,figsize=(10,5.5), sharex=True)\n \n plt.sca(ax[0])\n #plt.plot(cmg.phi1.wrap_at(wangle), cmg.phi2, '.', color=cmodel, ms=2)\n #plt.plot(tmem['phi1'], tmem['phi2'], 'k.', ms=2.5, label='Observed GD-1')\n plt.scatter(g['phi1'], g['phi2'], s=g['pmem']*2, c=g['pmem'], cmap=mpl.cm.binary, vmin=0.5, vmax=1.1, label='Observed GD-1')\n\n #plt.scatter(tmem['phi1'], tmem['phi2'], s=tmem['stream_prob']*2, c=tmem['stream_prob'], cmap=mpl.cm.binary, vmin=0.5, vmax=1.1, rasterized=True)\n #plt.plot(trv['phi1'], trv['phi2'], 'wo', mec='k')\n \n #plt.xlim(-50, -20)\n #plt.xlim(-40, -25)\n plt.xlim(-60, -20)\n plt.ylim(-6,6)\n plt.ylabel('$\\phi_2$ [deg]')\n \n \n plt.sca(ax[1])\n plt.plot(cmg.phi1.wrap_at(wangle), cmg.radial_velocity - polyrv(cmg.phi1.wrap_at(wangle))*u.km/u.s, '.', color=cmodel, ms=2, label='GD-1 model')\n #plt.plot(trv['phi1'], trv['vr'] - polyrv(trv['phi1']), 'wo', mec='k')\n #plt.errorbar(trv['phi1'], trv['vr'] - polyrv(trv['phi1']), yerr=trv['vre'], fmt='none', color='k', mec='k')\n \n plt.ylim(-20,20)\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\Delta$ $V_r$ [km s$^{-1}$]')\n \n colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red']\n colors = ['steelblue', mpl.cm.Oranges(0.4), mpl.cm.Oranges(0.8), mpl.cm.Oranges(0.6)]\n msize = [5, 7, 7, 7]\n markers = ['o', 'D', 'D', 'D']\n labels = ['Koposov et al. (2010)', '', '', 'Hectochelle 2019A']\n \n for e,i in enumerate(np.unique(trv['field'])[:]):\n ind = (trv['field']==i) & (trv['rank']<4)\n #color = colors[e]\n #ms = msize[e]\n #print(e)\n color = mpl.cm.magma(e/7)\n ms = 6\n marker = 'o'\n #marker = markers[e]\n \n plt.sca(ax[0])\n #plt.plot(trv['phi1'][ind], trv['phi2'][ind], marker, ms=ms, color=color, label='')\n if e==0:\n plt.scatter(trv['phi1'][ind], trv['phi2'][ind], c=trv['phi2'][ind], vmin=-0.5, vmax=1.5, cmap='viridis', label='Spectroscopic targets', s=50)\n else:\n plt.scatter(trv['phi1'][ind], trv['phi2'][ind], c=trv['phi2'][ind], vmin=-0.5, vmax=1.5, cmap='viridis', label='', s=50)\n \n plt.sca(ax[1])\n #plt.plot(trv['phi1'][ind], trv['vr'][ind] - polyrv(trv['phi1'][ind]), marker, ms=ms, color=color, label='')\n #plt.errorbar(trv['phi1'][ind], trv['vr'][ind] - polyrv(trv['phi1'][ind]), yerr=trv['vre'][ind], fmt='none', color=color, label='')\n \n plt.errorbar(trv['phi1'][ind], trv['vr'][ind] - polyrv(trv['phi1'][ind]), yerr=trv['vre'][ind], fmt='none', color='k', label='', zorder=0)\n plt.scatter(trv['phi1'][ind], trv['vr'][ind] - polyrv(trv['phi1'][ind]), c=trv['phi2'][ind], vmin=-0.5, vmax=1.5, cmap='viridis', label='', s=50)\n \n \n plt.sca(ax[0])\n plt.legend(loc=2, fontsize='small', handlelength=0.4)\n \n plt.sca(ax[1])\n plt.legend(loc=2, fontsize='small', handlelength=0.4)\n \n plt.tight_layout(h_pad=0)\n #plt.savefig('/home/ana/proposals/CfA2019B/hecto_gd1spur/plots/vr_map.pdf')\n plt.savefig('../plots/vr_map_chelle.pdf')\n plt.savefig('../plots/vr_map_chelle.png', dpi=200)\n \n \n# The Payne & Minesweeper reduction\n\ndef get_date(n):\n \"\"\"Get date when the field was observed (assumes no repeats)\"\"\"\n \n return glob.glob('/home/ana/data/hectochelle/tiles/gd1_{:d}/d*'.format(n))[0].split('/')[-1]\n\ndef read_msg_catalog():\n \"\"\"\"\"\"\n tp = Table.read('../data/payne_catalog.fits')\n keys = tp.colnames\n \n t = Table.read('../data/GD1_MSG_V1.3.fits')\n t.keep_columns(keys)\n t.pprint()\n t.write('../data/msg_catalog.fits', overwrite=True)\n\ndef build_payne_catalog():\n \"\"\"\"\"\"\n \n tile_dirs = glob.glob('../data/tiles/gd1*')\n par_list = glob.glob('{}/results/V1.2/pars/*pars'.format(tile_dirs[0]))\n \n t = Table.read(par_list[0], format='ascii')\n tout = t[:0].copy()\n \n for tdir in tile_dirs[:]:\n print('entering {}'.format(tdir))\n par_list = glob.glob('{}/results/V1.2/pars/*pars'.format(tdir))\n \n for pfile in par_list[:]:\n t = Table.read(pfile, format='ascii')\n tout.add_row(t[0])\n \n tout.pprint()\n tout.write('../data/payne_catalog.fits', overwrite=True)\n\ndef build_master_catalog():\n \"\"\"\"\"\"\n \n t = Table.read('../data/msg_catalog.fits')\n #t = Table.read('../data/gd1_MSG_v2.0.fits')\n #t.remove_columns(['ra', 'dec'])\n N = len(t)\n starid = np.zeros(N, dtype='int')\n \n tile_dirs = glob.glob('../data/tiles/gd1*')\n field = np.zeros(N, dtype='int')\n \n #for tl in tile_dirs:\n tile_dates = [glob.glob('{:s}/d*'.format(tl))[0].split('/')[-1] for tl in tile_dirs]\n fields = [int(tl.split('_')[-1]) for tl in tile_dirs]\n\n for i in range(N):\n name_elements = t['starname'][i].split('_')\n starid[i] = int(name_elements[0][3:])\n \n ind = tile_dates.index(name_elements[-1])\n #print(tile_dates, name_elements[-1], ind, fields[ind])\n #print(fields[ind])\n field[i] = fields[ind]\n \n #tile = [s for s in tile_dirs if name_elements[-1] in s][0]\n #tile_elements = tile.split('_')\n #field[i] = int(tile_elements[1])\n \n tphot = Table.read('../data/gd1_input_catalog.fits')\n tphot = tphot[starid]\n tphot['field'] = field\n \n tout = astropy.table.hstack([tphot, t])\n tout['field'][(tout['field']==7) & (tout['ra']>150)] = 5\n tout['field'][(tout['field']==7) & (tout['ra']<150)] = 7\n #tout['field'][tout['ra']<150] = 7\n \n \n # positions\n #c = coord.ICRS(ra=tout['ra']*u.deg, dec=tout['dec']*u.deg)\n #cg = c.transform_to(gc.GD1)\n \n ## heliocentric vr correction\n #for f in fields:\n #fname = '/home/ana/data/hectochelle/tiles/gd1_{0:d}/{1:s}/reduced/v3.0/specptg_gd1_{0:d}_cluster_{1:s}.sum.fits'.format(f, get_date(f))\n #hdu = fits.open(fname)\n #dvr = hdu[0].header['HELIO_RV']\n #ind = field==f\n ##tout['Vrad'][ind] += dvr\n \n c = coord.SkyCoord(ra=tout['ra']*u.deg, dec=tout['dec']*u.deg, pm_ra_cosdec=tout['pmra']*u.mas/u.yr, pm_dec=tout['pmdec']*u.mas/u.yr, radial_velocity=tout['Vrad']*u.km/u.s)\n #c = coord.SkyCoord(ra=g.ra, dec=g.dec, pm_ra_cosdec=g.pmra, pm_dec=g.pmdec)\n cg_nocorr = c.transform_to(gc.GD1)\n \n gd1_c_dist = gc.GD1(phi1=cg_nocorr.phi1, phi2=cg_nocorr.phi2,\n #distance=gd1_dist(cg.phi1),\n distance=gd1_dist(cg_nocorr.phi1),\n pm_phi1_cosphi2=cg_nocorr.pm_phi1_cosphi2,\n pm_phi2=cg_nocorr.pm_phi2,\n radial_velocity=cg_nocorr.radial_velocity)\n\n # Correct for reflex motion\n v_sun = coord.Galactocentric().galcen_v_sun\n observed = gd1_c_dist.transform_to(coord.Galactic)\n rep = observed.cartesian.without_differentials()\n rep = rep.with_differentials(observed.cartesian.differentials['s'] + v_sun)\n # gd1_c_nosunv = coord.Galactic(rep).transform_to(gc.GD1)\n cg = coord.Galactic(rep).transform_to(gc.GD1)\n \n # velocity differential\n # polynomial fit to the track\n pkl0 = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial_noperturb_python3.pkl', 'rb'))\n cmg0 = pkl0['cg']\n #c0 = pkl0['cg']\n \n ind = (cmg0.phi1.wrap_at(wangle)<0*u.deg) & (cmg0.phi1.wrap_at(wangle)>-80*u.deg)\n prv = np.polyfit(cmg0.phi1.wrap_at(wangle)[ind].deg, cmg0.radial_velocity[ind].value, 3)\n polyrv = np.poly1d(prv)\n drv = tout['Vrad'] - polyrv(cg.phi1.wrap_at(wangle).deg)\n \n #ind = (tout['field']==1) | (tout['field']==3) | (tout['field']==7) | (tout['field']==8)\n #mem = (tout['priority']<=3) & (-tout['lnL'] < 2.5E3+tout['SNR']**2.4) & (tout['SNR']>2) & (drv>-20) & (drv<-1)\n #prv = np.polyfit(cg.phi1.wrap_at(wangle)[ind & mem], cg.radial_velocity[ind & mem], 3)\n \n stream = (tout['field']==1) | (tout['field']==3) | (tout['field']==7) | (tout['field']==8)\n mem = (drv>-20) & (drv<-1) & (t['FeH']<-2) & (-tout['lnL'] < 2.5E3+tout['SNR']**2.4) & (tout['SNR']>2)\n prv_ = np.polyfit(cg.phi1.wrap_at(wangle)[stream & mem], tout['Vrad'][stream & mem], 1)\n \n polyrv_ = np.poly1d(prv_)\n drv = tout['Vrad'] - polyrv_(cg.phi1.wrap_at(wangle).deg)\n \n x_ = np.linspace(-47,-29,50)\n y_ = polyrv_(x_)\n \n plt.close()\n plt.plot(cg.phi1.wrap_at(wangle)[mem], tout['Vrad'][mem], 'ko')\n plt.plot(cg.phi1.wrap_at(wangle)[mem], drv[mem], 'ro')\n plt.plot(x_, y_, '-')\n \n tout['phi1'] = cg.phi1.wrap_at(wangle)\n tout['phi2'] = cg.phi2\n tout['delta_Vrad'] = drv\n tout['pm_phi1_cosphi2'] = cg.pm_phi1_cosphi2.to(u.mas/u.yr)\n tout['pm_phi2'] = cg.pm_phi2.to(u.mas/u.yr)\n tout['pm_phi1_cosphi2_nocorr'] = cg_nocorr.pm_phi1_cosphi2.to(u.mas/u.yr)\n tout['pm_phi2_nocorr'] = cg_nocorr.pm_phi2.to(u.mas/u.yr)\n \n dates = [get_date(n_) for n_ in fields]\n \n tout['delta_Vrad_field'] = np.zeros_like(drv)\n tout['xfocal'] = np.zeros_like(drv)\n tout['yfocal'] = np.zeros_like(drv)\n \n # per field properties\n for e, f in enumerate(fields):\n # select field\n ind_field = tout['field'] == f\n t_ = t[ind_field]\n \n # relative delta vrad\n tout['delta_Vrad_field'][ind_field] = drv[ind_field] - np.median(drv[ind_field])\n \n # focal plane\n fname = '/home/ana/data/hectochelle/tiles/gd1_{0:d}/{1:s}/reduced/v3.0/specptg_gd1_{0:d}_cluster_{1:s}.sum.fits'.format(f, dates[e])\n hdu = fits.open(fname)\n tout['xfocal'][ind_field] = hdu[5].data['XFOCAL'][t_['fibID']-1]\n tout['yfocal'][ind_field] = hdu[5].data['YFOCAL'][t_['fibID']-1]\n \n tout.pprint()\n print(tout.colnames)\n tout.write('../data/master_catalog.fits', overwrite=True)\n\ndef test_master():\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n t1 = Table.read('../data/master_catalog_v1.fits')\n t2 = Table.read('../data/master_catalog_v2.fits')\n \n print(len(t), len(t1), len(t2))\n\n\ndef gd1_dist(phi1):\n m = (10-7) / (60)\n return (m*phi1.wrap_at(180*u.deg).value + 10) * u.kpc\n\n\ndef ra_vr():\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n ind = t['priority']<4\n t = t[ind]\n print(t.colnames)\n \n plt.close()\n for f in np.unique(t['field']):\n t_ = t[t['field']==f]\n plt.plot(t_['ra'], t_['Vrad'], 'o')\n \n #plt.ylim(-120,-20)\n \n plt.tight_layout()\n \ndef phi1_vr():\n \"\"\"\"\"\"\n tall = Table.read('/home/ana/data/gd1-better-selection.fits')\n t = Table.read('../data/master_catalog.fits')\n ind = t['priority']<4\n t = t[ind]\n \n # fit polynomial\n stream = (t['field']==1) | (t['field']==3) | (t['field']==7) | (t['field']==8)\n mem = (t['delta_Vrad']>-20) & (t['delta_Vrad']<-1) & (t['FeH']<-2) & (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>2)\n prv = np.polyfit(t['phi1'][stream & mem], t['Vrad'][stream & mem], 1)\n \n polyrv = np.poly1d(prv)\n phi1_ = np.linspace(-47,-29,50)\n vr_ = polyrv(phi1_)\n \n pkl0 = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial_noperturb_python3.pkl', 'rb'))\n cmg0 = pkl0['cg']\n pkl = pickle.load(open('/home/ana/projects/gd1_spur/data/fiducial_perturb_python3.pkl', 'rb'))\n cmg = pkl['cg']\n \n plt.close()\n fig, ax = plt.subplots(2,1,figsize=(9,9), sharex=True, gridspec_kw={'height_ratios': (1,2)})\n \n for e, f in enumerate(np.unique(t['field'])):\n t_ = t[t['field']==f]\n #print(f, np.median(t_['phi1']))\n #ind = (t_['delta_Vrad']>-20) & (t_['FeH']<-2) & (-t_['lnL'] < 2.5E3+t_['SNR']**2.4) & (t_['SNR']>2) # & (t_['aFe']<0.3)\n ind = (t_['delta_Vrad']>-20) & (t_['delta_Vrad']<20) & (-t_['lnL'] < 2.5E3+t_['SNR']**2.4) & (t_['SNR']>2) # & (t_['aFe']<0.3)\n print(e, np.sum(ind))\n\n plt.sca(ax[0])\n plt.plot(t_['phi1'], t_['phi2'], 'o', alpha=0.2, color='C{:1d}'.format(e))\n plt.plot(t_['phi1'][ind], t_['phi2'][ind], 'o', color='C{:1d}'.format(e))\n \n plt.sca(ax[1])\n plt.plot(t_['phi1'], t_['Vrad'] - polyrv(t_['phi1']), 'o', alpha=0.2, color='C{:1d}'.format(e))\n plt.plot(t_['phi1'][ind], t_['Vrad'][ind] - polyrv(t_['phi1'][ind]), 'o', color='C{:1d}'.format(e))\n plt.scatter(t_['phi1'][ind], t_['Vrad'][ind] - polyrv(t_['phi1'][ind]), c=t_['phi2'][ind], s=20, vmin=-1, vmax=2)\n \n plt.sca(ax[0])\n plt.plot(tall['phi1'], tall['phi2'], 'k.', ms=2.5, label='Observed GD-1', zorder=0)\n plt.scatter(cmg.phi1.wrap_at(wangle), cmg.phi2, c=cmg.phi2.value, vmin=-1, vmax=2, s=10)\n #plt.plot(cmg0.phi1.wrap_at(wangle), cmg0.phi2, 'b.', ms=1)\n plt.ylim(-3,3)\n plt.xlim(-48,-20)\n plt.ylabel('$\\phi_2$ [deg]')\n \n plt.sca(ax[1])\n #plt.plot(t['phi1'][stream & mem], t['Vrad'][stream & mem], 'ko')\n #plt.plot(phi1_, vr_, '-')\n #plt.plot(cmg0.phi1.wrap_at(wangle), cmg0.radial_velocity-10*u.km/u.s - polyrv(cmg0.phi1.wrap_at(wangle).deg)*u.km/u.s, 'b.', ms=1)\n #plt.plot(cmg.phi1.wrap_at(wangle), cmg.radial_velocity-11*u.km/u.s - polyrv(cmg.phi1.wrap_at(wangle).deg)*u.km/u.s, 'r.', ms=1)\n plt.scatter(cmg.phi1.wrap_at(wangle), cmg.radial_velocity-11*u.km/u.s - polyrv(cmg.phi1.wrap_at(wangle).deg)*u.km/u.s, c=cmg.phi2.value, vmin=-1, vmax=2, s=10)\n plt.ylim(-40,40)\n plt.ylabel('$V_r$ [km s$^{-1}$]')\n plt.xlabel('$\\phi_1$ [deg]')\n \n plt.tight_layout(h_pad=0)\n plt.savefig('../plots/phi1_vr.png', dpi=150)\n\ndef vr_perfield(color=''):\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=3) & (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2) & (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>2)\n t = t[ind]\n #print(t.colnames)\n \n print(np.min(t['Teff']), np.max(t['Teff']))\n \n fields = np.unique(t['field'])\n Nf = np.size(fields)\n nrow = int(np.ceil(np.sqrt(Nf)))\n ncol = int(np.ceil(Nf/nrow))\n \n da = 2.5\n plt.close()\n fig, ax = plt.subplots(nrow, ncol, figsize=(ncol*da, nrow*da), sharex=True, sharey=True)\n for i in range(Nf):\n t_ = t[t['field']==fields[i]]\n\n irow = i%ncol\n icol = int(i/ncol)\n axes = [ax[irow][icol], ax[nrow-1][ncol-1]]\n\n for ax_ in axes:\n plt.sca(ax_)\n \n if len(color)==0:\n plt.plot(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), 'o', color='C{:1d}'.format(i))\n plt.errorbar(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), yerr=(t_['lerr_Vrad'], t_['uerr_Vrad']), fmt='none', color='C{:1d}'.format(i))\n elif color=='phi2':\n plt.scatter(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), c=t_['phi2'] - np.median(t_['phi2']), vmin=-0.3, vmax=0.3, cmap='RdBu', zorder=1)\n plt.errorbar(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), yerr=(t_['lerr_Vrad'], t_['uerr_Vrad']), fmt='none', color='0.5', zorder=0)\n elif color=='afe':\n plt.scatter(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), c=t_['aFe'], vmin=-0.2, vmax=0.6, cmap='magma', zorder=1)\n plt.errorbar(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), yerr=(t_['lerr_Vrad'], t_['uerr_Vrad']), fmt='none', color='0.5', zorder=0)\n elif color=='feh':\n plt.scatter(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), c=t_['FeH'], vmin=-3, vmax=-2, cmap='magma', zorder=1)\n plt.errorbar(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), yerr=(t_['lerr_Vrad'], t_['uerr_Vrad']), fmt='none', color='0.5', zorder=0)\n elif color=='teff':\n plt.scatter(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), c=t_['Teff'], cmap='magma', vmin=5200, vmax=6900, zorder=1)\n plt.errorbar(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), yerr=(t_['lerr_Vrad'], t_['uerr_Vrad']), fmt='none', color='0.5', zorder=0)\n \n plt.xlim(-0.6, 0.6)\n plt.ylim(-7, 7)\n \n for i in range(ncol):\n plt.sca(ax[nrow-1][i])\n plt.xlabel('$\\Delta$ $\\phi_1$ [deg]')\n \n for i in range(nrow):\n plt.sca(ax[i][0])\n plt.ylabel('$\\Delta$ $V_{rad}$ [km s$^{-1}$]')\n \n plt.tight_layout(h_pad=0.4, w_pad=0.1)\n plt.savefig('../plots/phi1_vr_perfield_{:s}.png'.format(color), dpi=150)\n \n\ndef phi1_dvr():\n \"\"\"\"\"\"\n \n tall = Table.read('/home/ana/data/gd1-better-selection.fits')\n\n t = Table.read('../data/master_catalog.fits')\n #ind = (t['priority']<4) #& (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>2)\n ind = (t['priority']<=3) & (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>3) #& (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2)\n \n ind = get_members(t)\n t = t[ind]\n \n plt.close()\n fig, ax = plt.subplots(4,1,figsize=(10,10), sharex=True, sharey=False)\n \n for f in np.unique(t['field']):\n t_ = t[t['field']==f]\n\n plt.sca(ax[0])\n plt.plot(t_['phi1'], t_['phi2'], 'o', label='{}'.format(f))\n \n plt.sca(ax[1])\n plt.plot(t_['phi1'], t_['delta_Vrad'], 'o')\n plt.errorbar(t_['phi1'], t_['delta_Vrad'], yerr=(t_['lerr_Vrad'], t_['uerr_Vrad']), fmt='none', color='0.2', zorder=0)\n \n plt.sca(ax[0])\n plt.plot(tall['phi1'], tall['phi2'], 'k.', ms=2.5, label='', zorder=0)\n plt.ylim(-3,3)\n plt.ylabel('$\\phi_2$ [deg]')\n #plt.legend(loc=2, bbox_to_anchor=(1,1), handlelength=0.5)\n \n plt.sca(ax[1])\n plt.ylim(-10,10)\n plt.xlim(-48,-28)\n plt.ylabel('$\\Delta V_r$ [km s$^{-1}$]')\n \n plt.sca(ax[2])\n im2 = plt.scatter(t['phi1'], t['delta_Vrad'], c=t['init_FeH'], vmin=-3, vmax=-1.5)\n plt.ylim(-10,10)\n plt.ylabel('$\\Delta V_r$ [km s$^{-1}$]')\n \n plt.sca(ax[3])\n im3 = plt.scatter(t['phi1'], t['delta_Vrad'], c=t['init_aFe'], vmin=-0.2, vmax=0.6)\n plt.ylim(-10,10)\n plt.ylabel('$\\Delta V_r$ [km s$^{-1}$]')\n plt.xlabel('$\\phi_1$ [deg]')\n \n plt.tight_layout(h_pad=0)\n \n plt.sca(ax[2])\n [[x00,y10],[x11,y01]] = plt.gca().get_position().get_points()\n pad = 0.01; width = 0.02\n cbar_ax = fig.add_axes([x11+pad, y10, width, y01-y10])\n plt.colorbar(im2, cax=cbar_ax)\n plt.ylabel('[Fe/H]')\n \n plt.sca(ax[3])\n [[x00,y10],[x11,y01]] = plt.gca().get_position().get_points()\n pad = 0.01; width = 0.02\n cbar_ax = fig.add_axes([x11+pad, y10, width, y01-y10])\n plt.colorbar(im3, cax=cbar_ax)\n plt.ylabel('[$\\\\alpha$/Fe]')\n \n #plt.savefig('../plots/phi1_dvr.png')\n\ndef phi2_dvr():\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<4) #& (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>2)\n t = t[ind]\n \n plt.close()\n for f in np.unique(t['field']):\n t_ = t[t['field']==f]\n plt.plot(t_['phi2'], t_['delta_Vrad'], 'o')\n plt.ylim(-20,20)\n \n plt.tight_layout()\n\ndef phi1_dvr_phi2():\n \"\"\"Plot delta vr vs phi1, colored by phi2\"\"\"\n t = Table.read('../data/master_catalog.fits')\n #ind = (t['priority']<=3) & (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2) & (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>2)\n ind = (t['priority']<=3) & (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>2) #& (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2)\n t = t[ind]\n \n plt.close()\n plt.figure(figsize=(10,10))\n \n plt.scatter(t['phi1'], t['delta_Vrad'], c=t['phi2'], vmin=-0.5, vmax=1.5, s=40)\n plt.ylim(-10,10)\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$V_r$ [km s$^{-1}$]')\n \n plt.tight_layout()\n\n \n\ndef afeh():\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<4) & (np.abs(t['delta_Vrad'])<20)\n tmem = t[ind]\n \n bins_feh = np.linspace(-3,0,40)\n bins_afe = np.linspace(-0.2,0.6,15)\n \n plt.close()\n fig, ax = plt.subplots(2,2,figsize=(11,5), sharex='col', sharey='row', gridspec_kw = {'width_ratios':[5, 1], 'height_ratios':[1,4], 'hspace':0, 'wspace':0})\n \n plt.sca(ax[0][1])\n plt.axis('off')\n \n plt.sca(ax[1][0])\n plt.plot(t['FeH'], t['aFe'], 'ko', ms=3, zorder=0, label='All targeted')\n plt.plot(tmem['init_FeH'], tmem['init_aFe'], 'ro', label='PM, CMD, |$\\Delta V_r$|<20 km s$^{-1}$')\n plt.errorbar(tmem['init_FeH'], tmem['init_aFe'], xerr=(tmem['lerr_FeH'], tmem['uerr_FeH']), yerr=(tmem['lerr_aFe'], tmem['uerr_aFe']), fmt='none', color='r', lw=0.5, label='')\n #plt.errorbar(tmem['FeH'], tmem['aFe'], xerr=tmem['std_FeH'], yerr=tmem['std_aFe'], fmt='none', color='r', lw=0.5, label='')\n \n plt.legend(loc=4, fontsize='small')\n plt.xlabel('[Fe/H]')\n plt.ylabel('[$\\\\alpha$/Fe]')\n \n plt.xlim(-3,0)\n plt.ylim(-0.2, 0.6)\n #plt.gca().set_aspect('equal')\n \n plt.sca(ax[0][0])\n plt.hist(t['FeH'], bins=bins_feh, color='k', alpha=0.3, density=True)\n plt.hist(tmem['init_FeH'], bins=bins_feh, color='r', alpha=0.3, density=True)\n plt.axis('off')\n \n plt.sca(ax[1][1])\n plt.hist(t['aFe'], bins=bins_afe, color='k', alpha=0.3, density=True, orientation='horizontal')\n plt.hist(tmem['init_aFe'], bins=bins_afe, color='r', alpha=0.3, density=True, orientation='horizontal')\n plt.axis('off')\n \n plt.tight_layout(h_pad=0, w_pad=0)\n #plt.savefig('../plots/afeh.png')\n\ndef dvr_feh():\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = t['priority']<4\n t = t[ind]\n \n plt.close()\n fig, ax = plt.subplots(1,2,figsize=(10,5), sharey=True)\n \n plt.sca(ax[0])\n plt.plot(t['FeH'], t['delta_Vrad'], 'ko')\n plt.ylim(-20,20)\n \n plt.sca(ax[1])\n plt.plot(t['aFe'], t['delta_Vrad'], 'ko')\n \n \n plt.tight_layout()\n\n\n# velocity statistics\n\ndef brute_membership():\n \"\"\"A very crude determination of the stream members\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=3) & (t['delta_Vrad']>-20) & (t['delta_Vrad']<-1)\n t = t[ind]\n t.write('../data/members_catalog.fits', overwrite=True)\n \n\ndef vr_profile(deg=1):\n \"\"\"\"\"\"\n \n t = Table.read('../data/members_catalog.fits')\n \n spur = (t['field']==2) | (t['field']==4) | (t['field']==6) | (t['field']==5)\n stream = ~spur\n stream2 = (t['field']==1) | (t['field']==3)\n labels = ['Stream', 'Spur']\n p = []\n \n plt.close()\n fig, ax = plt.subplots(1,2,figsize=(10,5), sharex=True, sharey=True)\n \n for e, ind in enumerate([stream, spur]):\n # polyfit\n p += [np.polyfit(t['phi1'][ind], t['Vrad'][ind], deg, w=t['std_Vrad'][ind]**-1)]\n poly = np.poly1d(p[e])\n x = np.linspace(-48, -29.2, 100)\n y = poly(x)\n \n # plot\n plt.sca(ax[e])\n plt.plot(t['phi1'][ind], t['Vrad'][ind], 'o', label='')\n plt.errorbar(t['phi1'][ind], t['Vrad'][ind], yerr=(t['lerr_Vrad'][ind], t['uerr_Vrad'][ind]), fmt='none', color='tab:blue', zorder=0, lw=1.5, label='')\n plt.plot(x, y, '-', lw=0.75, zorder=0, color='navy', label='$V_r$ = {:.1f}$\\\\times\\phi_1$ {:+.0f}'.format(p[e][0], p[e][1]))\n plt.title(labels[e], fontsize='medium')\n \n plt.sca(ax[(e+1)%2])\n plt.plot(x, y, '--', lw=0.75, zorder=0, color='navy', label='')\n \n plt.xlabel('$\\phi_1$ [deg]')\n \n outdict = {'p_stream': p[0], 'p_spur': p[1]}\n pkl = pickle.dump(outdict, open('../data/polyfit_rv_{:d}.pkl'.format(deg), 'wb'))\n \n plt.sca(ax[0])\n plt.ylabel('$V_r$ [km s$^{-1}$]')\n plt.legend(frameon=False, loc=1)\n \n plt.sca(ax[1])\n plt.legend(frameon=False, loc=1)\n \n plt.tight_layout()\n plt.savefig('../plots/vr_profile.png')\n\ndef relative_vr():\n \"\"\"\"\"\"\n\n t = Table.read('../data/members_catalog.fits')\n spur = (t['field']==2) | (t['field']==4) | (t['field']==6) | (t['field']==5)\n stream = ~spur\n \n pkl = pickle.load(open('../data/polyfit_rv_2.pkl', 'rb'))\n #p = pkl['p_spur']\n p = pkl['p_stream']\n print(p)\n poly = np.poly1d(p)\n dvr = t['Vrad'] - poly(t['phi1'])\n \n indices = [stream, spur]\n colors = ['darkorange', 'navy', ]\n labels = ['Stream', 'Spur']\n\n plt.close()\n plt.figure(figsize=(10,5))\n \n plt.axhline(0, color='k', alpha=0.5, lw=2)\n \n for e, ind in enumerate(indices):\n plt.plot(t['phi1'][ind], dvr[ind], 'o', color=colors[e], label=labels[e])\n plt.errorbar(t['phi1'][ind], dvr[ind], yerr=(t['lerr_Vrad'][ind], t['uerr_Vrad'][ind]), fmt='none', color=colors[e], zorder=0, lw=1.5, label='')\n \n plt.legend(loc=1, frameon=False)\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$V_r$ - $V_{r, stream\\;fit}$ [km s$^{-1}$]')\n plt.tight_layout()\n plt.savefig('../plots/relative_vr.png')\n\n\ndef afeh_comparison(priority=1):\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=priority) & (t['delta_Vrad']<0) & (t['delta_Vrad']>-20)\n \n spur = (t['field']==2) | (t['field']==4) | (t['field']==5) | (t['field']==6)\n stream = ~spur\n \n bins_feh = np.linspace(-3,0,40)\n bins_afe = np.linspace(-0.2,0.6,15)\n \n plt.close()\n fig, ax = plt.subplots(2,2,figsize=(11,5), sharex='col', sharey='row', gridspec_kw = {'width_ratios':[5, 1], 'height_ratios':[1,4], 'hspace':0, 'wspace':0})\n \n plt.sca(ax[0][1])\n plt.axis('off')\n \n plt.sca(ax[1][0])\n plt.plot(t['FeH'], t['aFe'], 'ko', ms=3, zorder=0, label='All targeted')\n\n tmem = t[ind & stream]\n plt.plot(tmem['FeH'], tmem['aFe'], 'ro', label='Stream')\n plt.errorbar(tmem['FeH'], tmem['aFe'], xerr=(tmem['lerr_FeH'], tmem['uerr_FeH']), yerr=(tmem['lerr_aFe'], tmem['uerr_aFe']), fmt='none', color='r', lw=0.5, label='')\n\n tmem = t[ind & spur]\n plt.plot(tmem['FeH'], tmem['aFe'], 'bo', label='Spur')\n plt.errorbar(tmem['FeH'], tmem['aFe'], xerr=(tmem['lerr_FeH'], tmem['uerr_FeH']), yerr=(tmem['lerr_aFe'], tmem['uerr_aFe']), fmt='none', color='b', lw=0.5, label='')\n\n \n plt.legend(loc=4, fontsize='small')\n plt.xlabel('[Fe/H]')\n plt.ylabel('[$\\\\alpha$/Fe]')\n \n plt.xlim(-3,0)\n plt.ylim(-0.2, 0.6)\n #plt.gca().set_aspect('equal')\n \n plt.sca(ax[0][0])\n plt.hist(t['FeH'], bins=bins_feh, color='k', alpha=0.3, density=True)\n\n tmem = t[ind & stream]\n plt.hist(tmem['FeH'], bins=bins_feh, color='r', alpha=0.3, density=True)\n\n tmem = t[ind & spur]\n plt.hist(tmem['FeH'], bins=bins_feh, color='b', alpha=0.3, density=True)\n plt.axis('off')\n \n plt.sca(ax[1][1])\n plt.hist(t['aFe'], bins=bins_afe, color='k', alpha=0.3, density=True, orientation='horizontal')\n \n tmem = t[ind & stream]\n plt.hist(tmem['aFe'], bins=bins_afe, color='r', alpha=0.3, density=True, orientation='horizontal')\n \n tmem = t[ind & spur]\n plt.hist(tmem['aFe'], bins=bins_afe, color='b', alpha=0.3, density=True, orientation='horizontal')\n plt.axis('off')\n \n plt.tight_layout(h_pad=0, w_pad=0)\n plt.savefig('../plots/afeh_comparison.{:d}.png'.format(priority))\n\n\ndef vr_median():\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=3) & (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2)\n t = t[ind]\n \n M = 1000\n fields = np.unique(t['field'])\n N = np.size(fields)\n phi1 = np.zeros(N)\n phi2 = np.zeros(N)\n sigma = np.zeros(N)\n\n v_med = np.zeros(N)\n v_std = np.zeros(N)\n sigma_med = np.zeros(N)\n sigma_std = np.zeros(N)\n \n for e in range(N):\n ind = t['field']==fields[e]\n t_ = t[ind]\n Nstar = len(t_)\n phi1[e] = np.median(t_['phi1'])\n phi2[e] = np.median(t_['phi2'])\n \n # subtract local gradient\n p = np.polyfit(t_['phi1'], t_['Vrad'], 1, w=t_['std_Vrad']**-1)\n poly = np.poly1d(p)\n \n dvr = t_['Vrad'] - poly(t_['phi1'])\n sigma[e] = np.std(dvr)\n\n vrad = np.random.randn(Nstar*M).reshape(Nstar,M) + t_['Vrad'][:,np.newaxis]\n dvr = vrad - poly(t_['phi1'])[:,np.newaxis]\n \n vs = np.median(vrad, axis=0)\n v_med[e] = np.median(vs)\n v_std[e] = np.std(vs)\n \n sigmas = np.std(dvr, axis=0)\n sigma_med[e] = np.median(sigmas)\n sigma_std[e] = np.std(sigmas)\n \n q = np.polyfit(phi1, v_med, 2, w=v_std**-1)\n qpoly = np.poly1d(q)\n dv_med = v_med - qpoly(phi1)\n np.save('../data/poly_vr_median', q)\n \n spur = (fields==2) | (fields==4) | (fields==5) | (fields==6)\n stream = ~spur\n \n np.savez('../data/field_vr_summary', phi1=phi1, phi2=phi2, dv_med=dv_med, v_std=v_std, sigma_med=sigma_med, sigma_std=sigma_std, fields=fields, spur=spur, stream=stream)\n \n spur_global = (t['field']==2) | (t['field']==4) | (t['field']==5) | (t['field']==6)\n stream_global = ~spur_global\n labels = ['Stream', 'Spur']\n colors = ['deepskyblue', 'midnightblue']\n colors = [mpl.cm.Blues(0.6), mpl.cm.Blues(0.85)]\n \n plt.close()\n fig, ax = plt.subplots(2,1,figsize=(6,6), sharex=True)\n \n plt.sca(ax[0])\n for e, ind in enumerate([stream_global, spur_global]):\n plt.plot(t['phi1'][ind], t['Vrad'][ind], 'o', label=labels[e], color=colors[e])\n plt.errorbar(t['phi1'][ind], t['Vrad'][ind], yerr=t['std_Vrad'][ind], fmt='none', color=colors[e], label='')\n \n x0, x1 = plt.gca().get_xlim()\n y0, y1 = plt.gca().get_ylim()\n x_ = np.linspace(x0, x1, 100)\n y_ = qpoly(x_)\n plt.plot(x_, y_, 'k-', alpha=0.3, lw=2, zorder=0)\n \n plt.xlim(x0, x1)\n plt.ylim(y0, y1)\n plt.legend(frameon=False, loc=1)\n plt.ylabel('$V_r$ [km s$^{-1}$]')\n plt.text(0.05, 0.1, 'Individual members', transform=plt.gca().transAxes)\n \n plt.sca(ax[1])\n for e, ind in enumerate([stream, spur]):\n plt.plot(phi1[ind], dv_med[ind], 'o', color=colors[e], label=labels[e])\n plt.errorbar(phi1[ind], dv_med[ind], yerr=v_std[ind], fmt='none', color=colors[e], label='')\n \n plt.axhline(0, color='k', alpha=0.3, lw=2, zorder=0)\n plt.legend(frameon=False, loc=3)\n plt.ylabel('$\\Delta V_r$ [km s$^{-1}$]')\n plt.ylim(-3,3)\n #plt.text(0.05, 0.1, 'Field median', transform=plt.gca().transAxes)\n \n #plt.sca(ax[2])\n #for e, ind in enumerate([stream, spur]):\n ##plt.plot(phi1[ind], sigma[ind], 'o')\n #plt.plot(phi1[ind], sigma_med[ind], 'o', color=colors[e])\n #plt.errorbar(phi1[ind], sigma_med[ind], yerr=sigma_std[ind], fmt='none', color=colors[e])\n \n plt.xlabel('$\\phi_1$ [deg]')\n #plt.ylabel('$\\sigma_{V_r}$ [km s$^{-1}$]')\n #plt.text(0.05, 0.1, 'Field dispersion', transform=plt.gca().transAxes)\n plt.tight_layout(h_pad=0.2)\n plt.savefig('../plots/kinematic_profile_median.png', dpi=200)\n \ndef vr_dispersion():\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n #ind = (t['priority']<=3) & (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2)\n ind = (t['priority']<=3) & (t['delta_Vrad']>-20) & (t['delta_Vrad']<-1) & (t['FeH']<-2) & (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>2)\n t = t[ind]\n \n M = 1000\n fields = np.unique(t['field'])\n N = np.size(fields)\n phi1 = np.zeros(N)\n sigma = np.zeros(N)\n\n v_med = np.zeros(N)\n v_std = np.zeros(N)\n sigma_med = np.zeros(N)\n sigma_std = np.zeros(N)\n \n for e in range(N):\n ind = t['field']==fields[e]\n t_ = t[ind]\n Nstar = len(t_)\n phi1[e] = np.median(t_['phi1'])\n \n # subtract local gradient\n p = np.polyfit(t_['phi1'], t_['Vrad'], 1, w=t_['std_Vrad']**-1)\n poly = np.poly1d(p)\n \n dvr = t_['Vrad'] - poly(t_['phi1'])\n sigma[e] = np.std(dvr)\n\n vrad = np.random.randn(Nstar*M).reshape(Nstar,M) + t_['Vrad'][:,np.newaxis]\n dvr = vrad - poly(t_['phi1'])[:,np.newaxis]\n \n vs = np.median(vrad, axis=0)\n v_med[e] = np.median(vs)\n v_std[e] = np.std(vs)\n \n sigmas = np.std(dvr, axis=0)\n sigma_med[e] = np.median(sigmas)\n sigma_std[e] = np.std(sigmas)\n \n q = np.polyfit(phi1, v_med, 2, w=v_std**-1)\n qpoly = np.poly1d(q)\n dv_med = v_med - qpoly(phi1)\n \n spur = (fields==2) | (fields==4) | (fields==5) | (fields==6)\n stream = ~spur\n \n spur_global = (t['field']==2) | (t['field']==4) | (t['field']==5) | (t['field']==6)\n stream_global = ~spur_global\n labels = ['Stream', 'Spur']\n colors = ['deepskyblue', 'midnightblue']\n colors = [mpl.cm.Blues(0.6), mpl.cm.Blues(0.85)]\n \n plt.close()\n fig, ax = plt.subplots(3,1,figsize=(10,9), sharex=True)\n \n plt.sca(ax[0])\n for e, ind in enumerate([stream_global, spur_global]):\n plt.plot(t['phi1'][ind], t['Vrad'][ind], 'o', label=labels[e], color=colors[e])\n plt.errorbar(t['phi1'][ind], t['Vrad'][ind], yerr=t['std_Vrad'][ind], fmt='none', color=colors[e], label='')\n \n x0, x1 = plt.gca().get_xlim()\n y0, y1 = plt.gca().get_ylim()\n x_ = np.linspace(x0, x1, 100)\n y_ = qpoly(x_)\n plt.plot(x_, y_, 'k-', alpha=0.3, lw=2, zorder=0)\n \n plt.xlim(x0, x1)\n plt.ylim(y0, y1)\n plt.legend(frameon=False, loc=1)\n plt.ylabel('$V_r$ [km s$^{-1}$]')\n plt.text(0.05, 0.1, 'Individual members', transform=plt.gca().transAxes)\n plt.ylim(-100,-10)\n \n plt.sca(ax[1])\n for e, ind in enumerate([stream, spur]):\n plt.plot(phi1[ind], dv_med[ind], 'o', color=colors[e])\n plt.errorbar(phi1[ind], dv_med[ind], yerr=v_std[ind], fmt='none', color=colors[e])\n \n plt.axhline(0, color='k', alpha=0.3, lw=2, zorder=0)\n plt.ylabel('$\\Delta V_r$ [km s$^{-1}$]')\n plt.ylim(-3,3)\n plt.text(0.05, 0.1, 'Field median', transform=plt.gca().transAxes)\n \n plt.sca(ax[2])\n for e, ind in enumerate([stream, spur]):\n #plt.plot(phi1[ind], sigma[ind], 'o')\n plt.plot(phi1[ind], sigma_med[ind], 'o', color=colors[e])\n plt.errorbar(phi1[ind], sigma_med[ind], yerr=sigma_std[ind], fmt='none', color=colors[e])\n \n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\sigma_{V_r}$ [km s$^{-1}$]')\n plt.ylim(0,5)\n plt.minorticks_on()\n plt.text(0.05, 0.1, 'Field dispersion', transform=plt.gca().transAxes)\n plt.tight_layout(h_pad=0.2)\n plt.savefig('../plots/kinematic_profile.png')\n\ndef cmd_members():\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=3) & (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2)\n t = t[ind]\n \n spur = (t['field']==2) | (t['field']==4) | (t['field']==5) | (t['field']==6)\n stream = ~spur\n labels = ['Stream', 'Spur']\n colors = [mpl.cm.Blues(0.6), mpl.cm.Blues(0.85)]\n \n plt.close()\n plt.figure(figsize=(5,8))\n \n for e, ind in enumerate([stream, spur]):\n plt.plot(t['g'][ind] - t['i'][ind], t['g'][ind], 'o', color=colors[e])\n \n plt.gca().invert_yaxis()\n plt.tight_layout()\n\ndef snr_members(verbose=False):\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=3) & (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2)\n t = t[ind]\n \n spur = (t['field']==2) | (t['field']==4) | (t['field']==5) | (t['field']==6)\n stream = ~spur\n labels = ['Stream', 'Spur']\n colors = [mpl.cm.Blues(0.6), mpl.cm.Blues(0.85)]\n \n if verbose:\n print(t.colnames)\n ind = t['SNR']>10\n print(t['starname'][ind])\n print(t['field'][ind])\n \n plt.close()\n plt.figure(figsize=(8,6))\n \n for e, ind in enumerate([stream, spur]):\n plt.plot(t['g'][ind], t['SNR'][ind], 'o', color=colors[e], label=labels[e])\n \n plt.legend(loc=1, frameon=False)\n plt.xlabel('g [mag]')\n plt.ylabel('S/N')\n \n plt.tight_layout()\n plt.savefig('../plots/snr_members.png')\n\ndef afeh_members():\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=3) & (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2)\n t = t[ind]\n \n spur = (t['field']==2) | (t['field']==4) | (t['field']==5) | (t['field']==6)\n stream = ~spur\n labels = ['Stream', 'Spur']\n colors = [mpl.cm.Blues(0.6), mpl.cm.Blues(0.85)]\n \n plt.close()\n fig, ax = plt.subplots(1,2,figsize=(11,6), sharey=True)\n \n for e, ind in enumerate([stream, spur]):\n plt.sca(ax[0])\n plt.plot(t['FeH'][ind], t['aFe'][ind], 'o', color=colors[e], label=labels[e])\n plt.errorbar(t['FeH'][ind], t['aFe'][ind], xerr=t['std_FeH'][ind], yerr=t['std_aFe'][ind], fmt='none', color=colors[e], label='', lw=0.5)\n \n plt.sca(ax[1])\n isnr = t['SNR']>10\n plt.plot(t['FeH'][ind & isnr], t['aFe'][ind & isnr], 'o', color=colors[e], label=labels[e])\n plt.errorbar(t['FeH'][ind & isnr], t['aFe'][ind & isnr], xerr=t['std_FeH'][ind & isnr], yerr=t['std_aFe'][ind & isnr], fmt='none', color=colors[e], label='', lw=0.5)\n \n plt.sca(ax[0])\n plt.xlabel('[Fe/H]')\n plt.ylabel('[$\\\\alpha$/Fe]')\n \n plt.sca(ax[1])\n plt.legend(loc=1, frameon=False)\n plt.xlabel('[Fe/H]')\n plt.title('S/N>10', fontsize='medium')\n \n plt.tight_layout()\n plt.savefig('../plots/afeh_members.png')\n\ndef distance_members():\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=3) & (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2)\n t = t[ind]\n \n spur = (t['field']==2) | (t['field']==4) | (t['field']==5) | (t['field']==6)\n stream = ~spur\n labels = ['Stream', 'Spur']\n colors = [mpl.cm.Blues(0.6), mpl.cm.Blues(0.85)]\n \n plt.close()\n plt.figure(figsize=(10, 5))\n \n for e, ind in enumerate([stream, spur]):\n plt.plot(t['phi1'][ind], t['Dist'][ind], 'o', color=colors[e])\n plt.errorbar(t['phi1'][ind], t['Dist'][ind], yerr=t['std_Dist'][ind], fmt='none', color=colors[e])\n \n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('Distance [kpc]')\n plt.ylim(0,13)\n \n plt.tight_layout()\n plt.savefig('../plots/distance_members.png')\n\n\nlightsteelblue = '#dde3ef'\nsteelblue = '#a2b3d2'\nnavyblue = '#294882'\nfuchsia = '#ff3643'\n\ndef plot_membership():\n \"\"\"Plot likely members and their selection in the CMD, radial velocity and chemical space\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n ind = (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>3) & np.isfinite(t['aFe'])\n t = t[ind]\n \n spur = (t['field']==2) | (t['field']==4) | (t['field']==5) | (t['field']==6)\n stream = ~spur\n\n mem_dict = get_members(t, full=True)\n cmdmem = mem_dict['cmdmem']\n pmmem = mem_dict['pmmem']\n vrmem = mem_dict['vrmem']\n fehmem = mem_dict['fehmem']\n vrlims = mem_dict['vrlims']\n fehlims = mem_dict['fehlims']\n mem = mem_dict['mem']\n #mem = pmmem & vrmem\n \n print(np.sum(pmmem & cmdmem), np.sum(pmmem & cmdmem & vrmem), np.sum(mem_dict['mem']))\n \n bvr = np.linspace(-50,50,50)\n \n plt.close()\n \n fig = plt.figure(figsize=(11.25,8.1))\n gs1 = mpl.gridspec.GridSpec(1,3)\n gs1.update(left=0.08, right=0.975, top=0.95, bottom=0.6, wspace=0.25)\n \n gs2 = mpl.gridspec.GridSpec(1,1)\n gs2.update(left=0.08, right=0.975, top=0.47, bottom=0.08)\n\n ax0 = fig.add_subplot(gs1[0])\n ax1 = fig.add_subplot(gs1[1])\n ax2 = fig.add_subplot(gs1[2])\n ax3 = fig.add_subplot(gs2[0])\n ax = [ax0, ax1, ax2, ax3]\n \n #fig, ax = plt.subplots(1, 3, figsize=(15,5.5)) #, gridspec_kw={'width_ratios': [1,1.7,3.2]})\n \n plt.sca(ax[0])\n prelim_mem = pmmem & ~mem\n plt.plot(t['pm_phi1_cosphi2'], t['pm_phi2'], 'o', color=lightsteelblue, mec='none', ms=3, alpha=1, label='Field stars')\n plt.plot(t['pm_phi1_cosphi2'][prelim_mem], t['pm_phi2'][prelim_mem], 'o', color=steelblue, mec='none', ms=6, alpha=1, label='Preliminary\\nGD-1 members')\n plt.plot(t['pm_phi1_cosphi2'][mem & stream], t['pm_phi2'][mem & stream], 'o', color=navyblue, mec='none', ms=6, label='GD-1 stream\\nmembers')\n plt.plot(t['pm_phi1_cosphi2'][mem & spur], t['pm_phi2'][mem & spur], '*', color=navyblue, mec='none', ms=10, label='GD-1 spur\\nmembers')\n \n pm = mpl.patches.Polygon(mem_dict['pmbox'], facecolor='none', edgecolor=fuchsia, lw=3, ls='--', zorder=2)\n plt.gca().add_artist(pm)\n \n #plt.legend(fontsize='small', loc=4, handlelength=0.75)\n plt.xlim(-12,2)\n plt.ylim(-5,5)\n plt.xlabel('$\\mu_{\\phi_1}$ [mas yr$^{-1}$]')\n plt.ylabel('$\\mu_{\\phi_2}$ [mas yr$^{-1}$]')\n plt.title('Proper motion', fontsize='medium')\n plt.text(0.1, 0.9, '{:2d}'.format(np.sum(pmmem)), transform=plt.gca().transAxes, ha='left')\n \n plt.sca(ax[1])\n prelim_mem = pmmem & ~mem\n plt.plot(t['g'] - t['i'], t['g'], 'o', color=lightsteelblue, mec='none', ms=3, alpha=1)\n plt.plot(t['g'][prelim_mem] - t['i'][prelim_mem], t['g'][prelim_mem], 'o', color=steelblue, mec='none', ms=6, alpha=1)\n #plt.plot(t['g'][pmmem & stream] - t['i'][pmmem & stream], t['g'][pmmem & stream], 'o', color=navyblue, mec='none', ms=5)\n #plt.plot(t['g'][pmmem & spur] - t['i'][pmmem & spur], t['g'][pmmem & spur], '*', color=navyblue, mec='none', ms=9)\n \n plt.plot(t['g'][mem & stream] - t['i'][mem & stream], t['g'][mem & stream], 'o', color=navyblue, mec='none', ms=6)\n plt.plot(t['g'][mem & spur] - t['i'][mem & spur], t['g'][mem & spur], '*', color=navyblue, mec='none', ms=10)\n #plt.plot(t['g'][mem] - t['i'][mem], t['g'][mem], 'o', color=navyblue, mec='none', ms=5)\n pm = mpl.patches.Polygon(mem_dict['cmdbox'], facecolor='none', edgecolor=fuchsia, lw=3, ls='--', zorder=2)\n plt.gca().add_artist(pm)\n \n plt.xlim(-0.5,1.5)\n plt.xlim(-0.1,1.1)\n plt.ylim(20.6,14.5)\n plt.xlabel('(g - i)$_0$ [mag]')\n plt.ylabel('g$_0$ [mag]')\n plt.title('+ Isochrone', fontsize='medium')\n plt.text(0.1, 0.9, '{:2d}'.format(np.sum(cmdmem & pmmem)), transform=plt.gca().transAxes, ha='left')\n \n plt.sca(ax[2])\n prelim_mem = pmmem & cmdmem & ~mem\n plt.hist(t['delta_Vrad'][~cmdmem & ~pmmem], bins=bvr, histtype='stepfilled', color=lightsteelblue, alpha=1, density=False)\n plt.hist(t['delta_Vrad'][prelim_mem], bins=bvr, histtype='stepfilled', color=steelblue, density=False)\n #plt.hist(t['delta_Vrad'][pmmem & cmdmem], bins=bvr, histtype='stepfilled', color=navyblue, density=False)\n plt.hist(t['delta_Vrad'][mem], bins=bvr, histtype='stepfilled', color=navyblue, density=False)\n \n for vrlim in vrlims:\n plt.axvline(vrlim, ls='--', lw=3, color=fuchsia)\n \n plt.xlim(-50,50)\n plt.ylabel('Number')\n plt.xlabel('$V_r$ - $V_{r,orbit}$ [km s$^{-1}$]')\n plt.title('+ Radial velocity', fontsize='medium')\n plt.text(0.1, 0.9, '{:2d}'.format(np.sum(pmmem & cmdmem & vrmem)), transform=plt.gca().transAxes, ha='left')\n \n plt.sca(ax[3])\n prelim_mem = pmmem & cmdmem & vrmem & ~mem\n #plt.plot(t['FeH'][pmmem & cmdmem & vrmem], t['aFe'][pmmem & cmdmem & vrmem], 'o', color=navyblue, mec='none', ms=6, label='GD-1 members', zorder=1)\n #plt.plot(t['FeH'][mem], t['aFe'][mem], 'o', color=navyblue, mec='none', ms=6, label='GD-1 members', zorder=1)\n\n plt.plot(t['FeH'][~(cmdmem & vrmem)], t['aFe'][~(cmdmem & vrmem)], 'o', color=lightsteelblue, mec='none', alpha=1, ms=4, label='Field stars', zorder=0)\n plt.plot(t['FeH'][prelim_mem], t['aFe'][prelim_mem], 'o', color=steelblue, mec='none', alpha=1, ms=7, zorder=0, label='Preliminary GD-1 members')\n plt.plot(t['FeH'][mem & stream], t['aFe'][mem & stream], 'o', color=navyblue, mec='none', ms=7, label='GD-1 stream members', zorder=1)\n plt.plot(t['FeH'][mem & spur], t['aFe'][mem & spur], '*', color=navyblue, mec='none', ms=12, label='GD-1 spur members', zorder=1)\n\n \n for fehlim in fehlims:\n plt.axvline(fehlim, ls='--', lw=3, color=fuchsia, label='', zorder=2)\n \n #plt.text(0.97, 0.9, '{:2d}'.format(np.sum(pmmem & cmdmem & vrmem & fehmem)), transform=plt.gca().transAxes, ha='right')\n plt.text(0.03, 0.9, '{:2d}'.format(np.sum(pmmem & cmdmem & vrmem & fehmem)), transform=plt.gca().transAxes, ha='left')\n plt.legend(loc=1, frameon=True, handlelength=1, fontsize='medium', markerscale=1.2)\n \n plt.xlim(-3.2,0.1)\n plt.ylim(-0.2,0.6)\n plt.ylabel('[$\\\\alpha$/Fe]')\n plt.xlabel('[Fe/H]')\n plt.title('+ Metallicity selection', fontsize='medium')\n\n #plt.tight_layout(w_pad=0.1)\n plt.savefig('../paper/members.pdf')\n\ndef get_members(t, full=False):\n \"\"\"Return indices of GD-1 members\"\"\"\n \n # cmd selection\n cmdlim = 3\n cmdmem = t['priority']<=cmdlim\n \n #iso = Table.read('/home/ana/data/isochrones/panstarrs/mist_12.6_-1.50.cmd', format='ascii.commented_header', header_start=12)\n iso = Table.read('/home/ana/data/isochrones/panstarrs/mist_12.6_-2.30.cmd', format='ascii.commented_header', header_start=12)\n phasecut = (iso['phase']>=0) & (iso['phase']<3)\n iso = iso[phasecut]\n\n # distance modulus\n distance_app = 7.8*u.kpc\n distance_app = 8.5*u.kpc\n dm = 5*np.log10((distance_app.to(u.pc)).value)-5\n\n # main sequence + rgb\n i_gi = iso['PS_g']-iso['PS_i'] + 0.1\n i_g = iso['PS_g']+dm\n\n i_left_narrow = i_gi - 0.47*(i_g/28)**5\n i_right_narrow = i_gi + 0.57*(i_g/28)**5\n #i_left_narrow = i_gi - 0.4*(i_g/28)**5\n #i_right_narrow = i_gi + 0.5*(i_g/28)**5\n poly_narrow = np.hstack([np.array([i_left_narrow, i_g]), np.array([i_right_narrow[::-1], i_g[::-1]])]).T\n\n i_left_wide = i_gi - 0.5*(i_g/28)**3\n i_right_wide = i_gi + 0.6*(i_g/28)**3\n #i_left_wide = i_gi - 0.6*(i_g/28)**3\n #i_right_wide = i_gi + 0.7*(i_g/28)**3\n poly_wide = np.hstack([np.array([i_left_wide, i_g]), np.array([i_right_wide[::-1], i_g[::-1]])]).T\n\n ind = (poly_wide[:,1]<18.3) & (poly_wide[:,1]>14)\n poly_low = poly_wide[ind]\n\n ind = (poly_narrow[:,1]<20.6) & (poly_narrow[:,1]>18.3)\n poly_med = poly_narrow[ind]\n \n # manual union\n nhalf_low = int(np.shape(poly_low)[0]/2)\n nhalf_med = int(np.shape(poly_med)[0]/2)\n cmdbox = np.vstack([poly_low[nhalf_low:,:], poly_med[nhalf_med:,:], poly_med[:nhalf_med,:], poly_low[:nhalf_low,:]])\n\n #cmdbox = poly_wide\n points = np.array([t['g'] - t['i'], t['g']]).T\n path_mem = mpl.path.Path(cmdbox)\n cmdmem = path_mem.contains_points(points) #| (t['g'] - t['i']<0.01)\n \n # radial velocity selection\n #vrlims = np.array([-20, -1])\n vrlims = np.array([-7,7])\n #vrlims = np.array([-10,10])\n vrmem = (t['delta_Vrad']>vrlims[0]) & (t['delta_Vrad']<vrlims[1])\n \n # feh selection\n ##fehlims = np.array([-2.8, -1.9])\n ##fehlims = np.array([-2.8, -2.1])\n #fehlims = np.array([-2.55, -1.8])\n #fehlims = np.array([-2.8, -1.8])\n #fehmem = (t['FeH']>fehlims[0]) & (t['FeH']<fehlims[1]) #& (t['aFe']>0.2)\n\n fehlims = np.array([-2.37, -1.75])\n #fehlims = np.array([-2.37, -1.83])\n #fehlims = np.array([-2.7, -1.7])\n ##fehlims = np.array([-2.3, -1.9])\n ##fehlims = np.array([-2.37, -1.83])\n ##fehlims = np.array([-2.2, -1.8])\n #fehlims = np.array([-2.3, -1.7])\n fehmem = (t['init_FeH']>fehlims[0]) & (t['init_FeH']<fehlims[1]) #& (t['init_aFe']<0.2)\n \n # pm selection\n pm1lims = np.array([-9,-4.5])\n pm2lims = np.array([-1.7,1])\n pm1lims = np.array([-9,-6.5])\n \n pm1lims = np.array([-10,-5.5])\n pm2lims = np.array([-2.5,2])\n \n pmmem = (t['pm_phi1_cosphi2']>pm1lims[0]) & (t['pm_phi1_cosphi2']<pm1lims[1]) & (t['pm_phi2']>pm2lims[0]) & (t['pm_phi2']<pm2lims[1])\n pmbox = np.array([[pm1lims[0], pm2lims[0]],[pm1lims[0], pm2lims[1]], [pm1lims[1], pm2lims[1]], [pm1lims[1], pm2lims[0]]])\n #print(np.shape(cmdbox), np.shape(pmbox))\n \n members = cmdmem & pmmem & vrmem & fehmem\n \n if full:\n return_dict = {'mem': members, 'cmdmem': cmdmem, 'pmmem': pmmem, 'vrmem': vrmem, 'fehmem': fehmem, 'cmdlim': cmdlim, 'cmdbox': cmdbox, 'vrlims': vrlims, 'fehlims': fehlims, 'pmbox': pmbox}\n return return_dict\n else:\n return members\n\ndef save_members(verbose=True):\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n mem = get_members(t)\n t = t[mem]\n \n if verbose:\n t.pprint()\n t.write('../data/members_catalog.fits', overwrite=True)\n\ndef print_members_field():\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n mem = get_members(t)\n t = t[mem]\n \n print(t.colnames)\n \n for i in range(8):\n ind = t['field']==i+1\n t_ = t[ind]\n print(i+1, np.array(t_['name']))\n \n\n\ndef spur_dvr_dep():\n \"\"\"Plot how delta radial velocity in the spur depends on various properties\"\"\"\n t = Table.read('../data/master_catalog.fits')\n mem = get_members(t)\n t = t[mem]\n \n #print(t.colnames, len(t))\n spurfields = [2,4,5,6]\n ind = np.array([t['field']==x for x in spurfields])\n ind_spur = np.sum(ind, axis=0, dtype=bool)\n #t = t[ind_spur]\n fields = np.unique(t['field'])\n \n params = ['logg', 'Teff', 'FeH', 'aFe']\n \n plt.close()\n fig, ax = plt.subplots(4,1,figsize=(8,12), sharex=True, sharey=True)\n \n #for f in fields:\n #t_ = t[t['field']==f]\n ##plt.scatter(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), c=t_['logg'], s=20, cmap='magma', vmin=3, vmax=5)\n #plt.scatter(t_['phi1'] - np.median(t_['phi1']), t_['Vrad'] - np.median(t_['Vrad']), c=t_['Teff'], s=20, cmap='magma')\n #plt.scatter(t['phi1'], t['delta_Vrad'], c=t['logg'], s=40, cmap='magma', vmin=3, vmax=5)\n for i in range(4):\n plt.sca(ax[i])\n plt.scatter(t['phi1'], t['Vrad'], c=t[params[i]], s=t['SNR']*8, cmap='magma') #, vmin=3, vmax=5)\n \n #plt.sca(ax[1])\n #plt.scatter(t['phi1'], t['Vrad'], c=t['Teff'], s=t['SNR']*5, cmap='magma') #, vmin=3, vmax=5)\n \n plt.tight_layout(h_pad=0)\n\n# proper motions\n\ndef pm_offset():\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=5) & (t['delta_Vrad']<-1) & (t['delta_Vrad']>-20) & (t['FeH']<-2) #& (np.abs(t['pmra_error']/t['pmra'])<0.07)\n t = t[ind]\n \n spur = (t['field']==2) | (t['field']==4) | (t['field']==5) | (t['field']==6)\n stream = ~spur\n labels = ['Stream', 'Spur']\n colors = [mpl.cm.Blues(0.6), mpl.cm.Blues(0.85)]\n \n #print(np.median(np.abs(t['pmra_error']/t['pmra'])))\n #print(np.median(np.abs(t['pmdec_error']/t['pmdec'])))\n \n qra = np.polyfit(t['phi1'][stream], t['pm_phi1_cosphi2'][stream], 2, w=t['pmra_error'][stream]**-1)\n polyra = np.poly1d(qra)\n dra = t['pm_phi1_cosphi2'] #- polyra(t['phi1'])\n #dra = t['pmra']\n \n qdec = np.polyfit(t['phi1'][stream], t['pm_phi2'][stream], 2, w=t['pmdec_error'][stream]**-1)\n polydec = np.poly1d(qdec)\n ddec = t['pm_phi2'] #- polydec(t['phi1'])\n #ddec = t['pmdec']\n \n plt.close()\n fig, ax = plt.subplots(2,1,figsize=(12,8), sharex=True)\n \n for e, s in enumerate([stream, spur]):\n plt.sca(ax[0])\n plt.plot(t['phi1'][s], dra[s], 'o', color=colors[e])\n plt.errorbar(t['phi1'][s], dra[s], yerr=t['pmra_error'][s], color=colors[e], fmt='none')\n #plt.plot(t['phi1'][s], t['pmra'][s], 'o', color=colors[e])\n #plt.errorbar(t['phi1'][s], t['pmra'][s], yerr=t['pmra_error'][s], color=colors[e], fmt='none')\n \n #plt.axhline(0, color='k', alpha=0.5, lw=0.5)\n #plt.ylim(-2.5,2.5)\n #plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\Delta\\mu_{\\phi_1}$ [mas yr$^{-1}$]')\n \n plt.sca(ax[1])\n plt.plot(t['phi1'][s], ddec[s], 'o', color=colors[e])\n plt.errorbar(t['phi1'][s], ddec[s], yerr=t['pmdec_error'][s], color=colors[e], fmt='none')\n #plt.plot(t['phi1'][s], t['pmdec'][s], 'o', color=colors[e])\n #plt.errorbar(t['phi1'][s], t['pmdec'][s], yerr=t['pmdec_error'][s], color=colors[e], fmt='none')\n \n #plt.axhline(0, color='k', alpha=0.5, lw=0.5)\n #plt.ylim(-2.5,2.5)\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\Delta\\mu_{\\phi_2}$ [mas yr$^{-1}$]')\n \n plt.tight_layout()\n plt.savefig('../plots/pm_offset.png')\n\n\n# payne version comparison\n\ndef version_match():\n \"\"\"Match stars ran through both versions of the pipeline\"\"\"\n \n t0 = Table.read('../data/master_catalog_v1.fits')\n t1 = Table.read('../data/master_catalog.fits')\n \n c0 = coord.SkyCoord(ra=t0['ra']*u.deg, dec=t0['dec']*u.deg)\n c1 = coord.SkyCoord(ra=t1['ra']*u.deg, dec=t1['dec']*u.deg)\n idx, d2d, d3d = c0.match_to_catalog_sky(c1)\n \ndef version_dvr(members=False):\n \"\"\"\"\"\"\n \n t0 = Table.read('../data/master_catalog_v1.fits')\n t1 = Table.read('../data/master_catalog.fits')\n \n if members:\n ind = t0['priority']<=2\n t0 = t0[ind]\n \n c0 = coord.SkyCoord(ra=t0['ra']*u.deg, dec=t0['dec']*u.deg)\n c1 = coord.SkyCoord(ra=t1['ra']*u.deg, dec=t1['dec']*u.deg)\n idx, d2d, d3d = c0.match_to_catalog_sky(c1)\n \n dvr = t0['Vrad'] - t1[idx]['Vrad']\n \n plt.close()\n fig, ax = plt.subplots(2,2, figsize=(16,8), sharex='col', sharey='row')\n \n # along the stream\n plt.sca(ax[0][0])\n plt.plot(t0['phi1'], dvr, 'ko')\n \n plt.ylabel('$\\Delta V_{r}$ [km s$^{-1}$]')\n \n plt.sca(ax[1][0])\n plt.plot(t0['phi1'], dvr, 'ko')\n plt.errorbar(t0['phi1'], dvr, yerr=t0['std_Vrad'], fmt='none', color='k', alpha=0.1)\n plt.axhline(0, color='r', lw=0.5, zorder=0)\n \n plt.ylim(-3,3)\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\Delta V_{r}$ [km s$^{-1}$]')\n \n # as a function of SNR\n plt.sca(ax[0][1])\n plt.plot(t0['SNR'], dvr, 'ko')\n \n plt.sca(ax[1][1])\n plt.plot(t0['SNR'], dvr, 'ko')\n plt.errorbar(t0['SNR'], dvr, yerr=t0['std_Vrad'], fmt='none', color='k', alpha=0.1)\n plt.axhline(0, color='r', lw=0.5, zorder=0)\n \n plt.ylim(-3,3)\n plt.xlabel('S/N')\n\n plt.tight_layout()\n plt.savefig('../plots/version_dvr.png')\n\ndef version_phi1_dvr():\n \"\"\"\"\"\"\n \n t0 = Table.read('../data/master_catalog_v1.fits')\n ind = t0['priority']<=2\n t0 = t0[ind]\n t1 = Table.read('../data/master_catalog.fits')\n \n c0 = coord.SkyCoord(ra=t0['ra']*u.deg, dec=t0['dec']*u.deg)\n c1 = coord.SkyCoord(ra=t1['ra']*u.deg, dec=t1['dec']*u.deg)\n idx, d2d, d3d = c0.match_to_catalog_sky(c1)\n \n t = t1[idx]\n \n #t = Table.read('../data/members_catalog.fits')\n spur = (t['field']==2) | (t['field']==4) | (t['field']==6) | (t['field']==5)\n stream = ~spur\n \n pkl = pickle.load(open('../data/polyfit_rv_2.pkl', 'rb'))\n #p = pkl['p_spur']\n p = pkl['p_stream']\n print(p)\n poly = np.poly1d(p)\n dvr = t['Vrad'] - poly(t['phi1'])\n dvr0 = t0['Vrad'] - poly(t0['phi1'])\n \n print(np.shape(dvr), np.shape(dvr0))\n \n indices = [stream, spur]\n colors = ['darkorange', 'navy', ]\n labels = ['Stream', 'Spur']\n\n plt.close()\n plt.figure(figsize=(10,5))\n \n plt.axhline(0, color='k', alpha=0.5, lw=2)\n \n for e, ind in enumerate(indices):\n plt.plot(t['phi1'][ind], dvr[ind], 'o', color=colors[e], label=labels[e]+' new')\n plt.errorbar(t['phi1'][ind], dvr[ind], yerr=(t['lerr_Vrad'][ind], t['uerr_Vrad'][ind]), fmt='none', color=colors[e], zorder=0, lw=1.5, label='')\n \n plt.errorbar(t0['phi1'][ind], dvr0[ind], yerr=(t0['lerr_Vrad'][ind], t0['uerr_Vrad'][ind]), fmt='none', color=colors[e], zorder=0, lw=1.5, label='')\n plt.plot(t0['phi1'][ind], dvr0[ind], 'wo', mec=colors[e], label=labels[e]+' old')\n\n plt.ylim(-20,20)\n plt.legend(loc=1, frameon=False, ncol=2)\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$V_r$ - $V_{r, stream\\;fit}$ [km s$^{-1}$]')\n \n plt.tight_layout()\n plt.savefig('../plots/version_phi_dvr.png')\n\ndef fiber_vr(expand=1):\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=3) & (t['delta_Vrad']>-20) & (t['delta_Vrad']<0)\n t = t[ind]\n \n fields = np.unique(t['field'])\n #fields = [2,4,5]\n #fields = [5,7]\n \n plt.close()\n plt.figure(figsize=(8,10))\n \n for e, f in enumerate(fields):\n ind = t['field']==f\n t_ = t[ind]\n \n #plt.plot(t_['fibID'], t_['delta_Vrad'] + e*30, 'o', ms=10, label='{}'.format(f))\n plt.plot(t_['fibID'], t_['Vrad'] - np.median(t_['Vrad']) + e*expand*30, 'o', ms=10, label='{}'.format(f))\n\n plt.xlabel('Fiber ID')\n plt.ylabel('$V_r$ - $\\overline{V_r}$ [km s$^{-1}$]')\n \n #plt.legend()\n #plt.ylim(-30,30)\n #plt.ylim(-10,10)\n plt.tight_layout()\n plt.savefig('../plots/fiber_vr_{}.png'.format(expand))\n \ndef fiber_vr_april():\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=3) & (t['delta_Vrad']>-20) & (t['delta_Vrad']<0)\n t = t[ind]\n print(t.colnames)\n print(t['name'])\n\n fields = [5,7]\n \n plt.close()\n plt.figure(figsize=(8,8))\n \n for e, f in enumerate(fields):\n ind = t['field']==f\n t_ = t[ind]\n print(np.array(t_['name']))\n #print(np.array(t_['']))\n \n #plt.plot(t_['fibID'], t_['delta_Vrad'] + e*30, 'o', ms=10, label='{}'.format(f))\n plt.plot(t_['fibID'], t_['Vrad'] - 0*np.median(t_['Vrad']) + e*0, 'o', ms=10, label='{}'.format(f))\n\n #plt.legend()\n #plt.ylim(-30,30)\n #plt.ylim(-10,10)\n plt.xlabel('Fiber ID')\n plt.ylabel('$V_r$ - $\\overline{V_r}$ [km s$^{-1}$]')\n \n plt.tight_layout()\n plt.savefig('../plots/fiber_vr_april.png')\n\ndef field_snr():\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n #print(t.colnames)\n \n fields = np.unique(t['field'])\n #fields = [2,4,5]\n \n plt.close()\n plt.figure(figsize=(8,8))\n \n for e, f in enumerate(fields):\n ind = t['field']==f\n t_ = t[ind]\n #print(t_['SNR'])\n #print(t_['phot_g_mean_mag'])\n \n plt.plot(t_['phot_g_mean_mag'], t_['SNR'], 'o', ms=4, label='{}'.format(f))\n \n plt.gca().set_yscale('log')\n plt.legend(markerscale=2)\n plt.xlabel('G')\n plt.ylabel('S/N')\n\n plt.tight_layout()\n plt.savefig('../plots/field_snr.png')\n\ndef phi1_vr_fiber():\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<=3) & (t['delta_Vrad']>-20) & (t['delta_Vrad']<0)\n t = t[ind]\n \n fields = np.unique(t['field'])\n \n plt.close()\n fig, ax = plt.subplots(8,1,figsize=(7,10))\n \n for e, f in enumerate(fields):\n ind = t['field']==f\n t_ = t[ind]\n \n plt.sca(ax[e])\n plt.scatter(t_['phi1'], t_['Vrad'], c=t_['fibID'], vmin=0, vmax=255,)\n \n plt.xticks([])\n\n plt.xlabel('$\\phi_1$ [deg]')\n fig.text(0.01, 0.5, '$V_r$ [km s$^{-1}$]', va='center', rotation='vertical')\n \n plt.tight_layout(rect=[0.03,0,1,1], h_pad=0)\n plt.savefig('../plots/phi1_vr_field.png')\n\ndef test_v2():\n \"\"\"\"\"\"\n \n t1 = Table.read('../data/GD1_MSG_V1.3.fits')\n t1.sort('objID')\n t2 = Table.read('../data/gd1_MSG_v2.0.fits')\n t2.sort('objID')\n t = Table.read('../data/master_catalog.fits')\n #t.sort('objID')\n #t.pprint()\n #print(t1.colnames)\n \n #t1.pprint()\n #t2.pprint()\n \n plt.close()\n plt.figure()\n \n plt.plot(t1['SNR'], t1['Vrad'] - t2['Vrad'], '.')\n \n plt.xlim(3,10)\n plt.ylim(-5,5)\n \n plt.tight_layout()\n\n\n# single field analysis\ndef field_vr(field=1):\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n ind = (t['priority']<4) & (t['field']==field)\n t = t[ind]\n \n #print(t.colnames)\n \n #print(np.median(t['Vrad']))\n dvr = t['Vrad'] - np.median(t['Vrad'])\n ind = np.abs(dvr)<10\n t = t[ind]\n dvr = dvr[ind]\n #print(t['starname'])\n \n plt.close()\n fig, ax = plt.subplots(1,2,sharey=True)\n \n plt.sca(ax[0])\n plt.scatter(t['phi1'], t['phi2'], c=dvr, vmin=-1, vmax=4, s=40)\n #plt.gca().set_aspect('equal')\n \n plt.sca(ax[1])\n plt.plot(dvr, t['phi2'], 'ko')\n \n \n plt.tight_layout()\n\ndef diagnostic_summary(f=1, color_by='phi1', cmd_mem=True, return_figure=False):\n \"\"\"Summary plot per field\"\"\"\n tin = Table.read('../data/master_catalog.fits')\n\n if cmd_mem:\n mem = get_members(tin)\n else:\n md = get_members(tin, full=True)\n mem = md['vrmem'] & md['fehmem'] & md['pmmem']\n \n ind = tin['field']==f\n print(f, np.sum(ind), len(tin))\n \n tin['gr'] = tin['g'] - tin['r']\n t = tin[ind & mem]\n \n alphas = [0.5,1]\n #pairs = [[['phi1', 'phi2'], ['xfocal', 'yfocal']], [['phi1', 'Vrad'], ['FeH', 'aFe']], [['phi1', 'delta_Vrad'], ['gr', 'g']]]\n #labels = [[['$\\phi_1$ [deg]', '$\\phi_2$ [deg]'], ['$x_{focal}$', '$y_{focal}$']], [['$\\phi_1$ [deg]', '$V_{rad}$ [km s$^{-1}$]'], ['[Fe/H]', '[$\\\\alpha$/Fe]']], [['$\\phi_1$ [deg]', '$\\Delta V_{rad}$ [km s$^{-1}$]'], ['(g - r)$_0$ [mag]', 'g$_0$ [mag]']]]\n #xlims = [[[-47,-29], [-300,300]], [[-47,-29], [-2.8,-1.8]], [[-47,-29], [0.1,0.6]]]\n #ylims = [[[-0.5,1.7], [-300,300]], [[-100,50], [-0.1,0.6]], [[-10,10], [21,15.5]]]\n \n pairs = [[['phi1', 'phi2'], ['pm_phi1_cosphi2', 'pm_phi2']], [['phi1', 'Vrad'], ['FeH', 'aFe']], [['phi1', 'delta_Vrad'], ['gr', 'g']]]\n labels = [[['$\\phi_1$ [deg]', '$\\phi_2$ [deg]'], ['$\\mu_{\\phi_1}$ [deg]', '$\\mu_{\\phi_2}$ [deg]']], [['$\\phi_1$ [deg]', '$V_{rad}$ [km s$^{-1}$]'], ['[Fe/H]', '[$\\\\alpha$/Fe]']], [['$\\phi_1$ [deg]', '$\\Delta V_{rad}$ [km s$^{-1}$]'], ['(g - r)$_0$ [mag]', 'g$_0$ [mag]']]]\n xlims = [[[-47,-29], [-9,-4.5]], [[-47,-29], [-2.8,-1.8]], [[-47,-29], [0.1,0.6]]]\n ylims = [[[-0.5,1.7], [-1.7,1]], [[-100,50], [-0.1,0.6]], [[-10,10], [21,15.5]]]\n \n \n plt.close()\n fig, ax = plt.subplots(3,2,figsize=(15,8), gridspec_kw={'width_ratios': (3,1)})\n \n ticks_minor = [0.25, 2, 1]\n ticks_major = [0.5, 10, 5]\n vlim = {'FeH': [-2.75, -2]}\n # cap errorbar\n t['std_Vrad'][t['std_Vrad']>3] = 3\n ind = t['fibID']<=120\n ichips = [ind, ~ind]\n mchips = ['o', 's']\n \n for irow in range(3):\n for icol in range(2):\n plt.sca(ax[irow][icol])\n #for e, t_ in enumerate([tin[mem], t]):\n plt.plot(tin[mem][pairs[irow][icol][0]], tin[mem][pairs[irow][icol][1]], 'ko', alpha=0.2, mec='none', ms=6, zorder=0)\n if color_by=='order':\n plt.scatter(t[pairs[irow][icol][0]], t[pairs[irow][icol][1]], c=np.arange(len(tin[mem])), zorder=1, cmap='plasma', s=50, ec='k')\n else:\n for ii, ind_ in enumerate(ichips):\n plt.scatter(t[pairs[irow][icol][0]][ind_], t[pairs[irow][icol][1]][ind_], c=t[color_by][ind_], zorder=1, cmap='plasma', s=50, ec='k', vmin=vlim[color_by][0], vmax=vlim[color_by][1], marker=mchips[ii])\n \n plt.xlabel(labels[irow][icol][0])\n plt.ylabel(labels[irow][icol][1])\n plt.xlim(xlims[irow][icol])\n plt.ylim(ylims[irow][icol])\n \n # insets\n icol = 0\n plt.sca(ax[irow][icol])\n axins = plt.gca().inset_axes([0.2, 0.1, 0.3, 0.8])\n for ii, ind_ in enumerate(ichips):\n axins.scatter(t[pairs[irow][icol][0]][ind_], t[pairs[irow][icol][1]][ind_], c=t[color_by][ind_], zorder=5, cmap='plasma', s=50, ec='k', marker=mchips[ii], vmin=vlim[color_by][0], vmax=vlim[color_by][1])\n if irow>0:\n axins.errorbar(t[pairs[irow][icol][0]], t[pairs[irow][icol][1]], yerr=t['std_Vrad'], fmt='none', color='k', zorder=0)\n \n axins.tick_params(labelleft=False, labelbottom=False)\n axins.yaxis.set_major_locator(mpl.ticker.MultipleLocator(ticks_major[irow]))\n axins.yaxis.set_minor_locator(mpl.ticker.MultipleLocator(ticks_minor[irow]))\n axins.minorticks_on()\n axins.grid(b=True, which='both', axis='both', zorder=0)\n \n irow = 2\n icol = 0\n plt.sca(ax[irow][icol])\n\n plt.text(0.1,0.1,'{:.1f}'.format(np.std(t[pairs[irow][icol][1]])), transform=plt.gca().transAxes)\n #plt.xlim(xlims[irow][icol])\n #plt.ylim(ylims[irow][icol])\n plt.ylim(-5,5)\n \n plt.tight_layout(h_pad=0, w_pad=0)\n \n if return_figure:\n return fig\n\ndef diagnostic_summary_all(cmd_mem=True, color_by='phi1'):\n \"\"\"\"\"\"\n \n pp = PdfPages('../plots/spectroscopic_summary_cmd{:d}_color{:s}.pdf'.format(cmd_mem, color_by))\n \n for f in range(1,9):\n fig = diagnostic_summary(f=f, color_by=color_by, cmd_mem=cmd_mem, return_figure=True)\n pp.savefig(fig)\n \n pp.close()\n\n\ndef vrot_field():\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>2)\n t = t[ind]\n print(t.colnames)\n \n plt.close()\n fig, ax = plt.subplots(1,2,figsize=(10,8), gridspec_kw=dict(width_ratios=[4,1]))\n \n plt.sca(ax[0])\n plt.plot(t['field']*240 + t['fibID'], t['Vrot'], 'k.')\n \n plt.ylim(0,6)\n plt.xlabel('Fiber ID + Field x 240')\n plt.ylabel('$V_{rot}$ [km s$^{-1}$]')\n \n plt.sca(ax[1])\n for i in range(8):\n ind = t['field'] == i+1\n plt.hist(t['Vrot'][ind], bins=np.linspace(0, 6, 20), histtype='step', color=mpl.cm.magma(i/8), label='Field {:d}'.format(i+1), orientation='horizontal', lw=np.median(t['Vrot'][ind]))\n \n plt.legend()\n plt.axis('off')\n \n plt.tight_layout(w_pad=0)\n\ndef vrot_sigma():\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n ind = (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>2)\n t = t[ind]\n mem = get_members(t)\n \n plt.close()\n plt.figure()\n \n for i in range(8):\n ind = t['field']==i+1\n med = np.median(t['Vrot'][ind])\n std = np.std(t['delta_Vrad'][ind & mem])\n plt.plot(med, std, 'ko')\n plt.text(med+0.05, std+0.05, '{:d}'.format(i+1), fontsize='small')\n \n line = np.linspace(0.7, 2.5,10)\n #plt.plot(line, line, 'k:')\n \n plt.xlabel('Field median $V_{rot}$ [km s$^{-1}$]')\n plt.ylabel('Field GD-1 member $\\sigma_{V_{rad}}$ [km s$^{-1}$]')\n plt.xlim(0.7, 2.5)\n plt.ylim(0.7, 2.5)\n plt.gca().xaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n plt.gca().yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n plt.gca().set_aspect('equal')\n plt.tight_layout()\n plt.savefig('../plots/vrot_sigma.png')\n\ndef afeh_kinematics():\n \"\"\"\"\"\"\n t = Table.read('../data/master_catalog.fits')\n \n md = get_members(t, full=True)\n mem = md['vrmem'] & md['fehmem'] & md['pmmem']\n t = t[mem]\n \n spurfields = [2,4,5,6]\n ind = np.array([t['field']==x for x in spurfields])\n ind_spur = np.sum(ind, axis=0, dtype=bool)\n ind_stream = ~ind_spur\n ms = [10,6]\n alpha = [0.5, 0.8]\n colors = ['k', 'r']\n \n p1 = [-2.6,0.5]\n p2 = [-2, 0]\n p1 = [-2.6,0.5]\n p2 = [-2.1, 0.04]\n \n k = (p2[1] - p1[1])/(p2[0] - p1[0])\n l = p1[1] - k*p1[0]\n ind = t['aFe'] < k*t['FeH'] + l\n\n plt.close()\n fig, ax = plt.subplots(2,1,figsize=(10,8))\n plt.sca(ax[0])\n plt.plot(t['FeH'][~ind], t['aFe'][~ind], 'ko')\n plt.plot(t['FeH'][ind], t['aFe'][ind], 'ro')\n \n plt.sca(ax[1])\n \n for e, ind_field in enumerate([ind_spur, ind_stream]):\n for ec, ind_chem in enumerate([~ind, ind]):\n sel = ind_field & ind_chem\n plt.errorbar(t['phi1'][sel], t['Vrad'][sel], yerr=t['std_Vrad'][sel], fmt='o', color=colors[ec], ms=ms[e], alpha=alpha[e])\n #plt.plot(t['phi1'][ind & ind_], t['Vrad'][ind & ind_], 'ro', ms=ms[e], alpha=alpha[e])\n plt.ylim(-100,50)\n #plt.ylim(-10,10)\n\n plt.tight_layout()\n\ndef afeh_clusters():\n \"\"\"\"\"\"\n \n t = Table.read('../data/master_catalog.fits')\n ind = (-t['lnL'] < 2.5E3+t['SNR']**2.4) & (t['SNR']>2)\n t = t[ind]\n mem = get_members(t)\n t = t[mem]\n \n clusters = ['M13', 'M3', 'M107', 'M67']\n clusters = ['M13', 'M107', 'M67']\n\n plt.close()\n plt.figure(figsize=(12,6))\n \n plt.plot(t['FeH'], t['aFe'], 'ko', mew=0)\n plt.errorbar(t['FeH'], t['aFe'], xerr=t['std_FeH'], yerr=t['std_aFe'], fmt='none', color='k', alpha=0.5, lw=0.5)\n plt.text(np.median(t['FeH']), 0.62, 'GD-1', va='bottom', color='k', fontsize='small')\n \n print(np.median(t['FeH']))\n print('GD-1', np.percentile(t['SNR'], [10, 50, 90]))\n print('FeH', np.std(t['FeH']), np.median(t['std_FeH']))\n print('aFe', np.std(t['aFe']), np.median(t['std_aFe']))\n \n for e, cl in enumerate(clusters):\n tc = Table.read('../data/{:s}_members.fits'.format(cl))\n color = mpl.cm.magma((e+1)/4)\n med = np.median(tc['FeH'])\n \n plt.plot(tc['FeH'], tc['aFe'], 'o', color=color, mew=0)\n plt.errorbar(tc['FeH'], tc['aFe'], xerr=tc['std_FeH'], yerr=tc['std_aFe'], fmt='none', color=color, alpha=0.5, lw=0.5)\n \n plt.text(med, 0.62, '{:s}'.format(clusters[e]), va='bottom', color=color, fontsize='small')\n \n print(clusters[e], np.percentile(tc['SNR'], [10, 50, 90]))\n print('FeH', np.std(tc['FeH']), np.std(tc['FeH'])/np.median(tc['std_FeH']))\n print('aFe', np.std(tc['aFe']), np.std(tc['aFe'])/np.median(tc['std_aFe']))\n \n plt.xlabel('[Fe/H]')\n plt.ylabel('[$\\\\alpha$/Fe]')\n plt.xlim(-3, 0.5)\n plt.ylim(-0.2, 0.6)\n \n plt.gca().set_aspect('equal')\n plt.tight_layout()\n plt.savefig('../plots/afeh_clusters.png')\n\n" ]
[ [ "matplotlib.cm.Oranges", "matplotlib.pyplot.legend", "numpy.polyfit", "numpy.poly1d", "numpy.savez", "matplotlib.ticker.MultipleLocator", "numpy.linspace", "numpy.sqrt", "matplotlib.pyplot.minorticks_on", "matplotlib.pyplot.plot", "numpy.concatenate", "numpy.max", "numpy.zeros_like", "numpy.random.randn", "matplotlib.patches.Polygon", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "numpy.unique", "numpy.save", "matplotlib.cm.magma", "numpy.ceil", "numpy.std", "numpy.size", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.gridspec.GridSpec", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.min", "matplotlib.pyplot.ylim", "numpy.median", "matplotlib.path.Path", "matplotlib.pyplot.savefig", "numpy.int64", "matplotlib.cm.Blues", "numpy.array", "matplotlib.pyplot.hist", "numpy.sum", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.axhline", "matplotlib.pyplot.axvline", "numpy.abs", "matplotlib.pyplot.scatter", "numpy.random.seed", "numpy.isfinite", "matplotlib.pyplot.subplots", "matplotlib.pyplot.sca", "numpy.percentile", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "numpy.shape", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
carlosal1015/active_subspaces
[ "caaf108fcb89548a374fea7704b0d92d38b4539a", "caaf108fcb89548a374fea7704b0d92d38b4539a" ]
[ "tutorials/test_functions/piston/piston.py", "tutorials/test_functions/robot/robot.py" ]
[ "##########################################################################\n#\n# PISTON SIMULATION FUNCTION\n#\n# Authors: Sonja Surjanovic, Simon Fraser University\n# Derek Bingham, Simon Fraser University\n# Questions/Comments: Please email Derek Bingham at [email protected].\n#\n# Copyright 2013. Derek Bingham, Simon Fraser University.\n#\n# THERE IS NO WARRANTY, EXPRESS OR IMPLIED. WE DO NOT ASSUME ANY LIABILITY\n# FOR THE USE OF THIS SOFTWARE. If software is modified to produce\n# derivative works, such modified software should be clearly marked.\n# Additionally, this program is free software; you can redistribute it \n# and/or modify it under the terms of the GNU General Public License as \n# published by the Free Software Foundation; version 2.0 of the License. \n# Accordingly, this program is distributed in the hope that it will be \n# useful, but WITHOUT ANY WARRANTY; without even the implied warranty \n# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU \n# General Public License for more details.\n#\n# For function details and reference information, see:\n# http://www.sfu.ca/~ssurjano/\n#\n##########################################################################\n#\n# OUTPUT AND INPUT:\n#\n# C = cycle time\n# xx = [M, S, V0, k, P0, Ta, T0]\n#\n#########################################################################\n#M = [30,60];\n#S = [0.005,0.020];\n#v0 = [0.002,0.010];\n#k = [1000, 5000];\n#p0 = [90000, 111000];\n#Ta = [290,296];\n#T0 = [340,360];\nimport numpy as np \ndef fun(xx):\n pi = np.pi\n # Scaling input from [-1,1] hypercube to the input parameter ranges\n M = (xx[0]+1)*0.5*(60-30)+30;\n S = (xx[1]+1)*0.5*(0.02-0.005)+0.005;\n V0 = (xx[2]+1)*0.5*(0.010-0.002)+0.002;\n k = (xx[3]+1)*0.5*(5000-1000)+1000;\n P0 = (xx[4]+1)*0.5*(110000-90000)+90000;\n Ta = (xx[5]+1)*0.5*(296-290)+290;\n T0 = (xx[6]+1)*0.5*(360-340)+340;\n \n Aterm1 = P0 * S;\n Aterm2 = 19.62 * M;\n Aterm3 = -k*V0 / S;\n A = Aterm1 + Aterm2 + Aterm3;\n \n Vfact1 = S / (2*k);\n Vfact2 = np.sqrt(A**2 + 4*k*(P0*V0/T0)*Ta);\n V = Vfact1 * (Vfact2 - A);\n \n fact1 = M;\n fact2 = k + (S**2)*(P0*V0/T0)*(Ta/(V**2));\n \n C = 2 * pi * np.sqrt(fact1/fact2);\n\n out = np.array([0.314159E1*(M*(k+4*k**2*P0*T0**(-1)*Ta*V0*((-0.1962E2)*M+ \\\n (-1)*P0*S+k*S**(-1)*V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2* \\\n M+P0*S+(-1)*k*S**(-1)*V0)**2)**0.5E0)**(-2))**(-1))**(-0.5E0) \\\n *(8*k**2*M*P0*T0**(-1)*Ta*V0*((-0.1962E2)+0.1962E2*( \\\n 0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)*(4*k*P0*T0**(-1)*Ta* \\\n V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2)**(-0.5E0))*(( \\\n -0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+(4*k*P0*T0**(-1)*Ta* \\\n V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2)**0.5E0)**(-3)*(k+ \\\n 4*k**2*P0*T0**(-1)*Ta*V0*((-0.1962E2)*M+(-1)*P0*S+k*S**( \\\n -1)*V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k* \\\n S**(-1)*V0)**2)**0.5E0)**(-2))**(-2)+(k+4*k**2*P0*T0**(-1)* \\\n Ta*V0*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+(4*k*P0* \\\n T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2) \\\n **0.5E0)**(-2))**(-1)),\n 0.251327E2*k**2*M*P0*T0**(-1)*Ta*V0*((-1)*P0+(-1)*k*S**( \\\n -2)*V0+0.1E1*(P0+k*S**(-2)*V0)*(0.1962E2*M+P0*S+(-1)*k* \\\n S**(-1)*V0)*(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1) \\\n *k*S**(-1)*V0)**2)**(-0.5E0))*((-0.1962E2)*M+(-1)*P0*S+k* \\\n S**(-1)*V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)* \\\n k*S**(-1)*V0)**2)**0.5E0)**(-3)*(M*(k+4*k**2*P0*T0**(-1)* \\\n Ta*V0*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+(4*k*P0* \\\n T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2) \\\n **0.5E0)**(-2))**(-1))**(-0.5E0)*(k+4*k**2*P0*T0**(-1)*Ta* \\\n V0*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+(4*k*P0*T0**(-1) \\\n *Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2)**0.5E0)**( \\\n -2))**(-2),\n (-0.125664E2)*k*P0*Ta*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)* \\\n V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1) \\\n *V0)**2)**0.5E0)**(-3)*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)* \\\n V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1) \\\n *V0)**2)**0.5E0+(-2)*k*S**(-2)*V0*(S+T0**(-1)*((-0.1962E2)* \\\n M*S*T0+P0*S**2*((-0.1E1)*T0+0.2E1*Ta)+0.1E1*k*T0*V0)*( \\\n 4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)* \\\n V0)**2)**(-0.5E0)))*(T0+4*k*P0*Ta*V0*((-0.1962E2)*M+(-1)* \\\n P0*S+k*S**(-1)*V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0* \\\n S+(-1)*k*S**(-1)*V0)**2)**0.5E0)**(-2))**(-1)*(M*(k+4*k**2* \\\n P0*T0**(-1)*Ta*V0*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+( \\\n 4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)* \\\n V0)**2)**0.5E0)**(-2))**(-1))**0.5E0,\n (-0.314159E1)*M**(-1)*(1+(-8)*k**2*P0*S**(-2)*T0**(-1)*Ta* \\\n V0**2*(S+T0**(-1)*((-0.1962E2)*M*S*T0+P0*S**2*((-0.1E1)* \\\n T0+0.2E1*Ta)+0.1E1*k*T0*V0)*(4*k*P0*T0**(-1)*Ta*V0+( \\\n 0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2)**(-0.5E0))*(( \\\n -0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+(4*k*P0*T0**(-1)*Ta* \\\n V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2)**0.5E0)**(-3)+8* \\\n k*P0*T0**(-1)*Ta*V0*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)* \\\n V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1) \\\n *V0)**2)**0.5E0)**(-2))*(M*(k+4*k**2*P0*T0**(-1)*Ta*V0*(( \\\n -0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+(4*k*P0*T0**(-1)*Ta* \\\n V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2)**0.5E0)**(-2))**( \\\n -1))**0.15E1,\n (-0.314159E1)*M*((-8)*k**2*P0*T0**(-1)*Ta*V0*((-1)*S+ \\\n 0.5E0*(4*k*T0**(-1)*Ta*V0+2*S*(0.1962E2*M+P0*S+(-1)*k* \\\n S**(-1)*V0))*(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1) \\\n *k*S**(-1)*V0)**2)**(-0.5E0))*((-0.1962E2)*M+(-1)*P0*S+k* \\\n S**(-1)*V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)* \\\n k*S**(-1)*V0)**2)**0.5E0)**(-3)+4*k**2*T0**(-1)*Ta*V0*(( \\\n -0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+(4*k*P0*T0**(-1)*Ta* \\\n V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2)**0.5E0)**(-2))*( \\\n M*(k+4*k**2*P0*T0**(-1)*Ta*V0*((-0.1962E2)*M+(-1)*P0*S+ \\\n k*S**(-1)*V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1) \\\n *k*S**(-1)*V0)**2)**0.5E0)**(-2))**(-1))**(-0.5E0)*(k+4* \\\n k**2*P0*T0**(-1)*Ta*V0*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1) \\\n *V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**( \\\n -1)*V0)**2)**0.5E0)**(-2))**(-2),\n (-0.314159E1)*M*((-0.16E2)*k**3*P0**2*T0**(-2)*Ta*V0**2*( \\\n 4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)* \\\n V0)**2)**(-0.5E0)*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+(4* \\\n k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0) \\\n **2)**0.5E0)**(-3)+4*k**2*P0*T0**(-1)*V0*((-0.1962E2)*M+(-1) \\\n *P0*S+k*S**(-1)*V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+ \\\n P0*S+(-1)*k*S**(-1)*V0)**2)**0.5E0)**(-2))*(M*(k+4*k**2* \\\n P0*T0**(-1)*Ta*V0*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+( \\\n 4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)* \\\n V0)**2)**0.5E0)**(-2))**(-1))**(-0.5E0)*(k+4*k**2*P0*T0**(-1) \\\n *Ta*V0*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+(4*k*P0* \\\n T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2) \\\n **0.5E0)**(-2))**(-2),\n (-0.314159E1)*M*(0.16E2*k**3*P0**2*T0**(-3)*Ta**2*V0**2*( \\\n 4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)* \\\n V0)**2)**(-0.5E0)*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+(4* \\\n k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0) \\\n **2)**0.5E0)**(-3)+(-4)*k**2*P0*T0**(-2)*Ta*V0*((-0.1962E2) \\\n *M+(-1)*P0*S+k*S**(-1)*V0+(4*k*P0*T0**(-1)*Ta*V0+( \\\n 0.1962E2*M+P0*S+(-1)*k*S**(-1)*V0)**2)**0.5E0)**(-2))*(M*( \\\n k+4*k**2*P0*T0**(-1)*Ta*V0*((-0.1962E2)*M+(-1)*P0*S+k* \\\n S**(-1)*V0+(4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)* \\\n k*S**(-1)*V0)**2)**0.5E0)**(-2))**(-1))**(-0.5E0)*(k+4*k**2* \\\n P0*T0**(-1)*Ta*V0*((-0.1962E2)*M+(-1)*P0*S+k*S**(-1)*V0+( \\\n 4*k*P0*T0**(-1)*Ta*V0+(0.1962E2*M+P0*S+(-1)*k*S**(-1)* \\\n V0)**2)**0.5E0)**(-2))**(-2)]) \n scaling = np.array([30,0.02-.005,0.01-0.002,4000,20000,6,20])*0.5\n dC = out*scaling\n return [C,dC]\n", "from __future__ import division\nimport numpy as np\n\ndef fun(xx):\n##########################################################################\n#\n# ROBOT ARM FUNCTION\n#\n# Author: Paul Diaz, Colorado School of Mines \n# Questions/Comments: Please email Paul Diaz at [email protected]\n#\n# Copyright 2016, Paul Diaz, Colorado School of Mines \n#\n# THERE IS NO WARRANTY, EXPRESS OR IMPLIED. WE DO NOT ASSUME ANY LIABILITY\n# FOR THE USE OF THIS SOFTWARE. If software is modified to produce\n# derivative works, such modified software should be clearly marked.\n# Additionally, this program is free software; you can redistribute it \n# and/or modify it under the terms of the GNU General Public License as \n# published by the Free Software Foundation; version 2.0 of the License. \n# Accordingly, this program is distributed in the hope that it will be \n# useful, but WITHOUT ANY WARRANTY; without even the implied warranty \n# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU \n# General Public License for more details.\n#\n# For function details and reference information, see:\n# http://www.sfu.ca/~ssurjano/\n#\n##########################################################################\n#\n# OUTPUT AND INPUTS:\n#\n# y = distance from the end of the arm to the origin\n# xx = [theta1, theta2, theta3, theta4, L1, L2, L3, L4]\n#\n#########################################################################\n# Shift and scale inputs from [-1,1] hypercube to describe ranges\n pi = np.pi\n b = pi/2\n a = -pi/2\n theta = (xx[0:4]+1)*(b-a)*0.5+a\n L = (xx[4:8]+1)*0.5+a\n L1 = L[0]\n L2 = L[1]\n L3 = L[2]\n L4 = L[3]\n T1 = theta[0]\n T2 = theta[1]\n T3 = theta[2]\n T4 = theta[3]\n \n u = L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4)\n v = L1*np.sin(T1)+L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4)\n f = (u**2 + v**2)**(0.5);\n \n out = np.array([ \n #(1/2)*(2*(L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos( \\\n #T1+T2+T3+T4))*((-1)*L1*np.sin(T1)+(-1)*L2*np.sin(T1+T2)+(-1)*L3* \\\n #np.sin(T1+T2+T3)+(-1)*L4*np.sin(T1+T2+T3+T4))+2*(L1*np.cos(T1)+L2*np.cos( \\\n #T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))*(L1*np.sin(T1)+L2* \\\n #np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4)))*((L1*np.cos(T1) \\\n #+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))**2+(L1* \\\n #np.sin(T1)+L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4))**2) \\\n #**(-1/2),\n 1e-12,\n (1/2)*(2*(L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos( \\\n T1+T2+T3+T4))*((-1)*L2*np.sin(T1+T2)+(-1)*L3*np.sin(T1+T2+T3)+(-1) \\\n *L4*np.sin(T1+T2+T3+T4))+2*(L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4* \\\n np.cos(T1+T2+T3+T4))*(L1*np.sin(T1)+L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+ \\\n L4*np.sin(T1+T2+T3+T4)))*((L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+ \\\n T2+T3)+L4*np.cos(T1+T2+T3+T4))**2+(L1*np.sin(T1)+L2*np.sin(T1+T2)+L3* \\\n np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4))**2)**(-1/2),\n (1/2)*(2*(L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos( \\\n T1+T2+T3+T4))*((-1)*L3*np.sin(T1+T2+T3)+(-1)*L4*np.sin(T1+T2+T3+T4) \\\n )+2*(L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))*(L1*np.sin(T1)+L2* \\\n np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4)))*((L1*np.cos(T1) \\\n +L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))**2+(L1* \\\n np.sin(T1)+L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4))**2) \\\n **(-1/2),\n (1/2)*((-2)*L4*(L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+ \\\n L4*np.cos(T1+T2+T3+T4))*np.sin(T1+T2+T3+T4)+2*L4*np.cos(T1+T2+T3+T4)*( \\\n L1*np.sin(T1)+L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4)) \\\n )*((L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+ \\\n T3+T4))**2+(L1*np.sin(T1)+L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin( \\\n T1+T2+T3+T4))**2)**(-1/2),\n (1/2)*(2*np.cos(T1)*(L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+ \\\n L4*np.cos(T1+T2+T3+T4))+2*np.sin(T1)*(L1*np.sin(T1)+L2*np.sin(T1+T2)+L3* \\\n np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4)))*((L1*np.cos(T1)+L2*np.cos(T1+T2) \\\n +L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))**2+(L1*np.sin(T1)+L2*np.sin( \\\n T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4))**2)**(-1/2),\n (1/2)*(2*np.cos(T1+T2)*(L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+ \\\n T3)+L4*np.cos(T1+T2+T3+T4))+2*np.sin(T1+T2)*(L1*np.sin(T1)+L2*np.sin(T1+ \\\n T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4)))*((L1*np.cos(T1)+L2* \\\n np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))**2+(L1*np.sin(T1) \\\n +L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4))**2)**( \\\n -1/2),\n (1/2)*(2*np.cos(T1+T2+T3)*(L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+ \\\n T2+T3)+L4*np.cos(T1+T2+T3+T4))+2*np.sin(T1+T2+T3)*(L1*np.sin(T1)+L2* \\\n np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4)))*((L1*np.cos(T1) \\\n +L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))**2+(L1* \\\n np.sin(T1)+L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4))**2) \\\n **(-1/2),\n (1/2)*(2*np.cos(T1+T2+T3+T4)*(L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos( \\\n T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))+2*np.sin(T1+T2+T3+T4)*(L1*np.sin(T1)+ \\\n L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4)))*((L1* \\\n np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))**2+ \\\n (L1*np.sin(T1)+L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4) \\\n )**2)**(-1/2)])\n scaling = np.array([(b-a),(b-a),(b-a),(b-a),1,1,1,1])*0.5\n df = out*scaling\n return [f,df]\n " ]
[ [ "numpy.array", "numpy.sqrt" ], [ "numpy.array", "numpy.cos", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
martanto/pyts
[ "1c0b0c9628068afaa57e036bd157fcb4ecdddee6" ]
[ "pyts/preprocessing/discretizer.py" ]
[ "\"\"\"Code for discretizers.\"\"\"\n\nimport numpy as np\nfrom numba import njit, prange\nfrom scipy.stats import norm\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.utils.validation import check_array\nfrom warnings import warn\n\n\n@njit()\ndef _uniform_bins(sample_min, sample_max, n_samples, n_bins):\n bin_edges = np.empty((n_bins - 1, n_samples))\n for i in prange(n_samples):\n bin_edges[:, i] = np.linspace(\n sample_min[i], sample_max[i], n_bins + 1)[1:-1]\n return bin_edges\n\n\n@njit()\ndef _digitize_1d(X, bins, n_samples, n_timestamps):\n X_digit = np.empty((n_samples, n_timestamps))\n for i in prange(n_samples):\n X_digit[i] = np.digitize(X[i], bins, right=True)\n return X_digit\n\n\n@njit()\ndef _digitize_2d(X, bins, n_samples, n_timestamps):\n X_digit = np.empty((n_samples, n_timestamps))\n for i in prange(n_samples):\n X_digit[i] = np.digitize(X[i], bins[i], right=True)\n return X_digit\n\n\ndef _digitize(X, bins):\n n_samples, n_timestamps = X.shape\n if isinstance(bins, tuple):\n X_binned = _digitize_2d(X, bins, n_samples, n_timestamps)\n else:\n if bins.ndim == 1:\n X_binned = _digitize_1d(X, bins, n_samples, n_timestamps)\n else:\n X_binned = _digitize_2d(X, bins, n_samples, n_timestamps)\n return X_binned.astype('int64')\n\n\nclass KBinsDiscretizer(BaseEstimator, TransformerMixin):\n \"\"\"Bin continuous data into intervals sample-wise.\n\n Parameters\n ----------\n n_bins : int (default = 5)\n The number of bins to produce. The intervals for the bins are\n determined by the minimum and maximum of the input data. It must\n be greater than or equal to 2.\n\n strategy : 'uniform', 'quantile' or 'normal' (default = 'quantile')\n Strategy used to define the widths of the bins:\n\n - 'uniform': All bins in each sample have identical widths\n - 'quantile': All bins in each sample have the same number of points\n - 'normal': Bin edges are quantiles from a standard normal distribution\n\n Examples\n --------\n >>> from pyts.preprocessing import KBinsDiscretizer\n >>> X = [[0, 1, 0, 2, 3, 3, 2, 1],\n ... [7, 0, 6, 1, 5, 3, 4, 2]]\n >>> discretizer = KBinsDiscretizer(n_bins=2)\n >>> print(discretizer.transform(X))\n [[0 0 0 1 1 1 1 0]\n [1 0 1 0 1 0 1 0]]\n\n \"\"\"\n\n def __init__(self, n_bins=5, strategy='quantile'):\n self.n_bins = n_bins\n self.strategy = strategy\n\n def fit(self, X=None, y=None):\n \"\"\"Pass.\n\n Parameters\n ----------\n X\n Ignored\n\n y\n Ignored\n\n Returns\n -------\n self : object\n\n \"\"\"\n return self\n\n def transform(self, X):\n \"\"\"Bin the data.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_timestamps)\n Data to transform.\n\n Returns\n -------\n X_new : array-like, shape = (n_samples, n_timestamps)\n Binned data.\n\n \"\"\"\n X = check_array(X, dtype='float64')\n n_samples, n_timestamps = X.shape\n self._check_params(n_timestamps)\n self._check_constant(X)\n\n bin_edges = self._compute_bins(\n X, n_samples, self.n_bins, self.strategy)\n X_new = _digitize(X, bin_edges)\n return X_new\n\n def _check_params(self, n_timestamps):\n if not isinstance(self.n_bins, (int, np.integer)):\n raise TypeError(\"'n_bins' must be an integer.\")\n if not 2 <= self.n_bins <= n_timestamps:\n raise ValueError(\n \"'n_bins' must be greater than or equal to 2 and lower than \"\n \"or equal to n_timestamps (got {0}).\".format(self.n_bins)\n )\n if self.strategy not in ['uniform', 'quantile', 'normal']:\n raise ValueError(\"'strategy' must be either 'uniform', 'quantile' \"\n \"or 'normal' (got {0}).\".format(self.strategy))\n\n def _check_constant(self, X):\n if np.any(np.max(X, axis=1) - np.min(X, axis=1) == 0):\n raise ValueError(\"At least one sample is constant.\")\n\n def _compute_bins(self, X, n_samples, n_bins, strategy):\n if strategy == 'normal':\n bins_edges = norm.ppf(np.linspace(0, 1, self.n_bins + 1)[1:-1])\n elif strategy == 'uniform':\n sample_min, sample_max = np.min(X, axis=1), np.max(X, axis=1)\n bins_edges = _uniform_bins(\n sample_min, sample_max, n_samples, n_bins).T\n else:\n bins_edges = np.percentile(\n X, np.linspace(0, 100, self.n_bins + 1)[1:-1], axis=1\n )\n mask = np.r_[\n ~np.isclose(0, np.diff(bins_edges, axis=0), rtol=0, atol=1e-8),\n np.full((1, n_samples), True)\n ]\n if (self.n_bins > 2) and np.any(~mask):\n samples = np.where(np.any(~mask, axis=0))[0]\n warn(\"Some quantiles are equal. The number of bins will be \"\n \"smaller for sample {0}. Consider decreasing the number \"\n \"of bins or removing these samples.\".format(samples))\n bins_edges = np.asarray([bins_edges[:, i][mask[:, i]]\n for i in range(n_samples)])\n if bins_edges.ndim == 1:\n bins_edges = tuple(bins_edges)\n return bins_edges\n" ]
[ [ "sklearn.utils.validation.check_array", "numpy.linspace", "numpy.min", "numpy.full", "numpy.max", "numpy.diff", "numpy.any", "numpy.digitize", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arshadshk/greyatom-python-for-data-science
[ "3383a57abf5e389a83842af23b3add74aba752d8" ]
[ "Numpy-assignment/code.py" ]
[ "# --------------\n# Importing header files\r\nimport numpy as np\r\n\r\n# Path of the file has been stored in variable called 'path'\r\n\r\n\r\ndata = np.genfromtxt(path, delimiter=\",\",skip_header = 1)\r\n\r\nprint(data)\r\n\r\n\r\n\r\n#New record\r\nnew_record=[[50, 9, 4, 1, 0, 0, 40, 0]]\r\n\r\ncensus = np.concatenate((data,new_record),axis=0)\r\n#Code starts here\r\n\n\n\n# --------------\n#Code starts here\r\nage = census[:,0]\r\nmax_age = age.max()\r\nmin_age = age.min()\r\nage_mean = age.mean()\r\nage_std = age.std()\r\n\r\n\r\n\r\n\r\nprint(age,max_age,min_age,age_mean,age_std)\n\n\n# --------------\n#Code starts here\r\n\r\nrace_0 = census[census[:,2]==0]\r\nrace_1 = census[census[:,2]==1]\r\nrace_2 = census[census[:,2]==2]\r\nrace_3 = census[census[:,2]==3]\r\nrace_4 = census[census[:,2]==4]\r\n\r\nlen_0, len_1, len_2, len_3, len_4 = len(race_0), len(race_1), len(race_2), len(race_3), len(race_4)\r\n\r\nminority_race = 3\n\n\n# --------------\n#Code starts here\r\n\r\nsenior_citizens = census[census[:,0]>60]\r\nworking_hours_sum = senior_citizens[:,6]\r\n\r\nworking_hours_sum = working_hours_sum.sum()\r\nprint(working_hours_sum)\r\nsenior_citizens_len =len(senior_citizens)\r\n\r\navg_working_hours = working_hours_sum/senior_citizens_len\r\n\r\nprint(avg_working_hours)\n\n\n# --------------\n#Code starts here\r\n\r\nhigh = census[census[:,1]>10]\r\nlow = census[census[:,1]<=10]\r\n\r\navg_pay_high =high[:,7].mean()\r\n\r\navg_pay_low = low[:,7].mean()\n\n\n" ]
[ [ "numpy.concatenate", "numpy.genfromtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
smithj382/groundmotion-processing
[ "b6c8284dc945deb868e90c6e674b1743a424b4f9" ]
[ "gmprocess/utils/download_utils.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# stdlib imports\nimport os\nimport json\nimport logging\nimport warnings\nimport glob\nimport requests\n\n# third party imports\nfrom libcomcat.search import get_event_by_id\nfrom obspy.geodetics.base import locations2degrees\nfrom obspy.core.utcdatetime import UTCDateTime\nfrom obspy.taup import TauPyModel\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\n# local imports\nfrom gmprocess.io.global_fetcher import fetch_data\nfrom gmprocess.utils.misc import get_rawdir\n\nTIMEFMT2 = \"%Y-%m-%dT%H:%M:%S.%f\"\n\n\nFLOAT_PATTERN = r\"[-+]?[0-9]*\\.?[0-9]+\"\n\n\ndef download(event, event_dir, config):\n \"\"\"Download waveform data.\n\n Args:\n event (ScalarEvent):\n Object containing basic event hypocenter, origin time, magnitude.\n event_dir (str):\n Path where raw directory should be created (if downloading).\n config (dict):\n Dictionary with gmprocess configuration information.\n\n \"\"\"\n # Make raw directory\n rawdir = get_rawdir(event_dir)\n\n tcollection, terrors = fetch_data(\n event.time.datetime,\n event.latitude,\n event.longitude,\n event.depth_km,\n event.magnitude,\n config=config,\n rawdir=rawdir,\n stream_collection=False,\n )\n # download an event.json file in each event directory,\n # in case user is simply downloading for now\n create_event_file(event, event_dir)\n download_rupture_file(event.id, event_dir)\n\n if len(tcollection):\n logging.debug(\"tcollection.describe_string():\")\n logging.debug(tcollection.describe_string())\n\n # Plot the raw waveforms\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n pngfiles = glob.glob(os.path.join(rawdir, \"*.png\"))\n if not len(pngfiles):\n plot_raw(rawdir, tcollection, event)\n\n\ndef create_event_file(event, event_dir):\n \"\"\"Write event.json file in event_dir.\n\n Args:\n event (ScalarEvent):\n Input event object.\n event_dir (str):\n Directory where event.json should be written.\n \"\"\"\n\n # download event.json for event\n eventid = event.origins[-1].resource_id.id\n try:\n tevent = get_event_by_id(eventid)\n req = requests.get(tevent.detail_url)\n data = json.loads(req.text)\n except BaseException:\n # convert time to comcat time\n ctime = (event.time - UTCDateTime(\"1970-01-01T00:00:00.000Z\")) * 1000.0\n data = {\n \"type\": \"Feature\",\n \"properties\": {\n \"mag\": event.magnitude,\n \"time\": ctime,\n },\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [event.longitude, event.latitude, event.depth / 1000.0],\n },\n \"id\": eventid,\n }\n\n # dump the event.json file to the event directory\n eventfile = os.path.join(event_dir, \"event.json\")\n with open(eventfile, \"w\") as f:\n json.dump(data, f)\n\n\ndef plot_raw(rawdir, tcollection, event):\n \"\"\"Make PNG plots of a collection of raw waveforms.\n\n Args:\n rawdir (str):\n Directory where PNG files should be saved.\n tcollection (StreamCollection):\n Sequence of streams.\n event (ScalarEvent):\n Event object.\n\n \"\"\"\n model = TauPyModel(model=\"iasp91\")\n source_depth = event.depth_km\n if source_depth < 0:\n source_depth = 0\n eqlat = event.latitude\n eqlon = event.longitude\n for stream in tcollection:\n stlat = stream[0].stats.coordinates[\"latitude\"]\n stlon = stream[0].stats.coordinates[\"longitude\"]\n dist = float(locations2degrees(eqlat, eqlon, stlat, stlon))\n try:\n arrivals = model.get_travel_times(\n source_depth_in_km=source_depth,\n distance_in_degree=dist,\n phase_list=[\"P\", \"p\", \"Pn\"],\n )\n arrival = arrivals[0]\n arrival_time = arrival.time\n except BaseException as e:\n fmt = (\n 'Exception \"%s\" generated by get_travel_times() dist=%.3f ' \"depth=%.1f\"\n )\n logging.warning(fmt % (str(e), dist, source_depth))\n arrival_time = 0.0\n ptime = arrival_time + (event.time - stream[0].stats.starttime)\n outfile = os.path.join(rawdir, f\"{stream.get_id()}.png\")\n\n fig, axeslist = plt.subplots(nrows=3, ncols=1, figsize=(12, 6))\n for ax, trace in zip(axeslist, stream):\n times = np.linspace(\n 0.0, trace.stats.endtime - trace.stats.starttime, trace.stats.npts\n )\n ax.plot(times, trace.data, color=\"k\")\n ax.set_xlabel(\"seconds since start of trace\")\n ax.set_title(\"\")\n ax.axvline(ptime, color=\"r\")\n ax.set_xlim(left=0, right=times[-1])\n legstr = \"%s.%s.%s.%s\" % (\n trace.stats.network,\n trace.stats.station,\n trace.stats.location,\n trace.stats.channel,\n )\n ax.legend(labels=[legstr], frameon=True, loc=\"upper left\")\n tbefore = event.time + arrival_time < trace.stats.starttime + 1.0\n tafter = event.time + arrival_time > trace.stats.endtime - 1.0\n if tbefore or tafter:\n legstr = f\"P arrival time {ptime:.1f} seconds\"\n left, right = ax.get_xlim()\n xloc = left + (right - left) / 20\n bottom, top = ax.get_ylim()\n yloc = bottom + (top - bottom) / 10\n ax.text(xloc, yloc, legstr, color=\"r\")\n plt.savefig(outfile, bbox_inches=\"tight\")\n plt.close()\n\n\ndef download_rupture_file(event_id, event_dir):\n \"\"\"Downlaod rupture file from Comcat.\n\n Args:\n event_id (str):\n Event id.\n event_dir (str):\n Event directory.\n \"\"\"\n try:\n event = get_event_by_id(event_id)\n except BaseException:\n logging.info(f\"{event_id} not found in ComCat.\")\n return\n try:\n shakemap_prod = event.getProducts(\"shakemap\")\n shakemap_prod[0].getContent(\n \"rupture.json\", os.path.join(event_dir, \"rupture.json\")\n )\n except BaseException:\n logging.info(f\"{event_id} does not have a rupture.json file.\")\n" ]
[ [ "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jxtrbtk/bindex
[ "8feee8ff9db371d9e691e10ea70467446c0412b1" ]
[ "operatorQS.py" ]
[ "import sys\nimport pandas as pd\n\nfrom decimal import Decimal\n\nimport lib\nimport lib.features\n\nimport operatorQN\nSAFETY_K = 1.0\n\ndef makeup_prices(data_price_mid, data_price_std, t_data):\n symbol = t_data[\"pair\"]\n \n ask, bid = 1/3, 1/3 \n try:\n ask, bid = lib.features.optimized_ask_bid(symbol)\n print(\"ask:{:.04f} bid:{:.04f}\".format(ask, bid))\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(\"price advisor error:\", exc_type, exc_tb.tb_lineno, str(e))\n \n ask = SAFETY_K * ask\n bid = SAFETY_K * bid\n \n price_buy = operatorQN.round_by(data_price_mid-data_price_std*bid, t_data[\"tick_size\"])\n price_sell = operatorQN.round_by(data_price_mid+data_price_std*ask, t_data[\"tick_size\"])\n\n # get order book\n symbol = t_data[\"pair\"]\n baj = lib.api.get_rj(\"depth?symbol={}&limit=1000\".format(symbol))\n\n cols = [\"price\", \"quantity\"]\n dfa = pd.DataFrame(baj[\"asks\"], columns=cols, dtype=float)\n dfb = pd.DataFrame(baj[\"bids\"], columns=cols, dtype=float)\n \n # match price in order book\n price_buy = operatorQN.match_price (dfb, price_buy, -1, tick_size=t_data[\"tick_size\"]) \n price_sell = operatorQN.match_price (dfa, price_sell, +1, tick_size=t_data[\"tick_size\"]) \n \n return price_buy, price_sell \n\ndef main():\n operatorQN.makeup_prices = makeup_prices\n operatorQN.main()\n \nif __name__ == \"__main__\":\n main()" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
lukeandshuo/IR2VI_journal
[ "faf46b41e25e4bf9521466f76c5d688d198da9ee" ]
[ "models/networks.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom util.visualizer import util\nimport cv2\n###############################################################################\n# Functions\n###############################################################################\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n init.normal(m.weight.data, 0.0, 0.02)\n elif classname.find('Linear') != -1:\n init.normal(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1:\n init.normal(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\n\ndef weights_init_xavier(m):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n init.xavier_normal(m.weight.data, gain=0.02)\n elif classname.find('Linear') != -1:\n init.xavier_normal(m.weight.data, gain=0.02)\n elif classname.find('BatchNorm2d') != -1:\n init.normal(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm2d') != -1:\n init.normal(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\n\ndef weights_init_orthogonal(m):\n classname = m.__class__.__name__\n print(classname)\n if classname.find('Conv') != -1:\n init.orthogonal(m.weight.data, gain=1)\n elif classname.find('Linear') != -1:\n init.orthogonal(m.weight.data, gain=1)\n elif classname.find('BatchNorm2d') != -1:\n init.normal(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\n\ndef init_weights(net, init_type='normal'):\n print('initialization method [%s]' % init_type)\n if init_type == 'normal':\n net.apply(weights_init_normal)\n elif init_type == 'xavier':\n net.apply(weights_init_xavier)\n elif init_type == 'kaiming':\n net.apply(weights_init_kaiming)\n elif init_type == 'orthogonal':\n net.apply(weights_init_orthogonal)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n\n\ndef get_norm_layer(norm_type='instance'):\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)\n elif norm_type == 'none':\n norm_layer = None\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\n\ndef get_scheduler(optimizer, opt):\n if opt.lr_policy == 'lambda':\n def lambda_rule(epoch):\n lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)\n return lr_l\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n elif opt.lr_policy == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)\n elif opt.lr_policy == 'plateau':\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)\n else:\n return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)\n return scheduler\n\n\ndef define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[]):\n netG = None\n use_gpu = len(gpu_ids) > 0\n norm_layer = get_norm_layer(norm_type=norm)\n\n if use_gpu:\n assert(torch.cuda.is_available())\n\n if which_model_netG == 'resnet_9blocks':\n netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)\n elif which_model_netG == 'resnet_6blocks':\n netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids)\n elif which_model_netG == 'unet_128':\n netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)\n elif which_model_netG == 'unet_256':\n netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)\n else:\n raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)\n if len(gpu_ids) > 0:\n netG.cuda(gpu_ids[0])\n init_weights(netG, init_type=init_type)\n return netG\n\n\ndef define_D(input_nc, ndf, which_model_netD,\n n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):\n netD = None\n use_gpu = len(gpu_ids) > 0\n norm_layer = get_norm_layer(norm_type=norm)\n\n if use_gpu:\n assert(torch.cuda.is_available())\n if which_model_netD == 'basic':\n netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)\n elif which_model_netD == 'n_layers':\n netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)\n elif which_model_netD == 'pixel':\n netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)\n elif which_model_netD == 'object':\n netD = ObjectDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)\n else:\n raise NotImplementedError('Discriminator model name [%s] is not recognized' %\n which_model_netD)\n if use_gpu:\n netD.cuda(gpu_ids[0])\n init_weights(netD, init_type=init_type)\n return netD\n\n\n\ndef print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('Total number of parameters: %d' % num_params)\n\n\n##############################################################################\n# Classes\n##############################################################################\n\n\n# Defines the GAN loss which uses either LSGAN or the regular GAN.\n# When LSGAN is used, it is basically same as MSELoss,\n# but it abstracts away the need to create the target label tensor\n# that has the same size as the input\nclass GANLoss(nn.Module):\n def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,\n tensor=torch.FloatTensor):\n super(GANLoss, self).__init__()\n self.real_label = target_real_label\n self.fake_label = target_fake_label\n self.real_label_var = None\n self.fake_label_var = None\n self.Tensor = tensor\n if use_lsgan:\n self.loss = nn.MSELoss()\n else:\n self.loss = nn.BCELoss()\n\n def get_target_tensor(self, input, target_is_real):\n target_tensor = None\n if target_is_real:\n create_label = ((self.real_label_var is None) or\n (self.real_label_var.numel() != input.numel()))\n if create_label:\n real_tensor = self.Tensor(input.size()).fill_(self.real_label)\n self.real_label_var = Variable(real_tensor, requires_grad=False)\n target_tensor = self.real_label_var\n else:\n create_label = ((self.fake_label_var is None) or\n (self.fake_label_var.numel() != input.numel()))\n if create_label:\n fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)\n self.fake_label_var = Variable(fake_tensor, requires_grad=False)\n target_tensor = self.fake_label_var\n return target_tensor\n\n def __call__(self, input, target_is_real):\n target_tensor = self.get_target_tensor(input, target_is_real)\n return self.loss(input, target_tensor)\n\n\n# Defines the generator that consists of Resnet blocks between a few\n# downsampling/upsampling operations.\n# Code and idea originally from Justin Johnson's architecture.\n# https://github.com/jcjohnson/fast-neural-style/\nclass ResnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect'):\n assert(n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n self.input_nc = input_nc\n self.output_nc = output_nc\n self.ngf = ngf\n self.gpu_ids = gpu_ids\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n skip_connection = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, output_nc, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(output_nc)]\n\n self.skip_connection = nn.Sequential(*skip_connection)\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,\n bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling):\n mult = 2**i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,\n stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2**n_downsampling\n for i in range(n_blocks):\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n\n for i in range(n_downsampling):\n mult = 2**(n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n ###TODO move tanh to outside\n # model += [nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n self.tanh_modul = nn.Sequential(nn.Tanh())\n def forward(self, input):\n if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):\n texture = nn.parallel.data_parallel(self.model, input, self.gpu_ids)\n else:\n texture= self.model(input)\n\n # texture_img = texture.cpu().data.numpy()[0,0,:,:]\n texture_img= util.tensor2im(self.tanh_modul(texture).data)\n\n # plt.imshow(texture_img)\n\n contour = self.skip_connection(input)\n contour_img = util.tensor2im(self.tanh_modul(contour).data)\n # cv2.imwrite(\"/data/Sensiac/SensiacNight/I2I_OD_Night/Imagery/demo(out)/images/ir_texture.png\",texture_img)\n # cv2.imwrite(\"/data/Sensiac/SensiacNight/I2I_OD_Night/Imagery/demo(out)/images/ir_contour.png\", contour_img)\n # cv2.imshow(\"texture\",texture_img)\n # cv2.imshow('contour',contour_img)\n # cv2.waitKey(1)\n # contour_img = contour.cpu().data.numpy()[0,0,:,:]\n # plt.imshow(hf_img)\n # plt.show()\n output = contour+texture\n return self.tanh_modul(output)\n\n\n# Define a resnet block\nclass ResnetBlock(nn.Module):\n def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)\n\n def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),\n norm_layer(dim),\n nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),\n norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n out = x + self.conv_block(x)\n return out\n\n\n# Defines the Unet generator.\n# |num_downs|: number of downsamplings in UNet. For example,\n# if |num_downs| == 7, image of size 128x128 will become of size 1x1\n# at the bottleneck\nclass UnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf=64,\n norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):\n super(UnetGenerator, self).__init__()\n self.gpu_ids = gpu_ids\n\n # construct unet structure\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)\n for i in range(num_downs - 5):\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)\n\n self.model = unet_block\n\n def forward(self, input):\n if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):\n return nn.parallel.data_parallel(self.model, input, self.gpu_ids)\n else:\n return self.model(input)\n\n\n# Defines the submodule with skip connection.\n# X -------------------identity---------------------- X\n# |-- downsampling -- |submodule| -- upsampling --|\nclass UnetSkipConnectionBlock(nn.Module):\n def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = down + up\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n if self.outermost:\n return self.model(x)\n else:\n return torch.cat([x, self.model(x)], 1)\n\n\n# Defines the PatchGAN discriminator with the specified arguments.\nclass NLayerDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):\n super(NLayerDiscriminator, self).__init__()\n self.gpu_ids = gpu_ids\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n sequence = [\n nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2**n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2**n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]\n\n if use_sigmoid:\n sequence += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):\n return nn.parallel.data_parallel(self.model, input, self.gpu_ids)\n else:\n return self.model(input)\n\n\nclass ObjectDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):\n super(ObjectDiscriminator, self).__init__()\n self.gpu_ids = gpu_ids\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n sequence = [\n nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, 4):\n nf_mult_prev = nf_mult\n nf_mult = min(2**n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2**4, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=3, stride=1)]\n\n if use_sigmoid:\n sequence += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):\n return nn.parallel.data_parallel(self.model, input, self.gpu_ids)\n else:\n return self.model(input)\n\n\nclass PixelDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):\n super(PixelDiscriminator, self).__init__()\n self.gpu_ids = gpu_ids\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n \n self.net = [\n nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),\n norm_layer(ndf * 2),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]\n\n if use_sigmoid:\n self.net.append(nn.Sigmoid())\n\n self.net = nn.Sequential(*self.net)\n\n def forward(self, input):\n if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):\n return nn.parallel.data_parallel(self.net, input, self.gpu_ids)\n else:\n return self.net(input)\n\n" ]
[ [ "torch.optim.lr_scheduler.LambdaLR", "torch.cuda.is_available", "torch.nn.init.xavier_normal", "torch.autograd.Variable", "torch.nn.ReplicationPad2d", "torch.nn.Dropout", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.nn.Sigmoid", "torch.nn.init.constant", "torch.optim.lr_scheduler.StepLR", "torch.nn.init.kaiming_normal", "torch.nn.Sequential", "torch.nn.ConvTranspose2d", "torch.nn.Conv2d", "torch.nn.BCELoss", "torch.nn.LeakyReLU", "torch.nn.init.orthogonal", "torch.nn.ReflectionPad2d", "torch.nn.parallel.data_parallel", "torch.nn.Tanh", "torch.nn.ReLU", "torch.nn.MSELoss", "torch.nn.init.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ROCmSoftwarePlatform/translate
[ "32a6380d914ebe1a6c38c4992aac9600ed3d9810" ]
[ "pytorch_translate/beam_search_and_decode_v2.py" ]
[ "#!/usr/bin/env python3\n\nfrom typing import List, Tuple\n\nimport numpy as np\nimport torch\nimport torch.jit\nimport torch.jit.quantized\nfrom pytorch_translate.beam_decode import BeamDecode\nfrom pytorch_translate.ensemble_export import (\n DecoderBatchedStepEnsemble,\n EncoderEnsemble,\n FakeCharSourceEncoderEnsemble,\n load_models_from_checkpoints,\n)\nfrom torch import Tensor\n\n\nclass DecoderBatchedStepEnsemble2BeamWithEOS(DecoderBatchedStepEnsemble):\n \"\"\"\n This class inherits DecoderBatchedStepEnsemble class. While keeping the basic\n functionality of running decoding ensemble, two new features are added:\n expanding double beam size at each search step in case half are eos, appending\n extra EOS tokens at the end.\n \"\"\"\n\n # TODO:this class will be merged with upstream after BeamSearchAndDecodeV1 shipped.\n def forward(\n self,\n input_tokens,\n prev_scores,\n active_hypos,\n timestep,\n final_step,\n *inputs,\n src_tuple=None,\n ):\n # input_tokens size: 2 * beam_size -> beam_size,\n # since we only need half of them which are active.\n input_tokens = input_tokens.index_select(dim=0, index=active_hypos).unsqueeze(1)\n prev_scores = prev_scores.index_select(dim=0, index=active_hypos)\n\n eos_token = torch.LongTensor([self.tgt_dict.eos()])\n\n (\n log_probs_per_model,\n attn_weights_per_model,\n state_outputs,\n beam_axis_per_state,\n possible_translation_tokens,\n ) = self._get_decoder_outputs(\n input_tokens, prev_scores, timestep, *inputs, src_tuple=src_tuple\n )\n\n average_log_probs = torch.mean(\n torch.cat(log_probs_per_model, dim=1), dim=1, keepdim=True\n )\n\n if possible_translation_tokens is None:\n word_rewards = self.word_rewards\n else:\n word_rewards = self.word_rewards.index_select(\n 0, possible_translation_tokens\n )\n word_rewards = word_rewards.unsqueeze(dim=0).unsqueeze(dim=0)\n\n average_log_probs_with_rewards = average_log_probs + word_rewards\n\n average_attn_weights = torch.mean(\n torch.cat(attn_weights_per_model, dim=1), dim=1, keepdim=True\n )\n\n # need control-flow to see if it's final_step, thus written in script.\n @torch.jit.script\n def generate_outputs(\n final_step: Tensor,\n average_log_probs_with_rewards: Tensor,\n average_attn_weights: Tensor,\n prev_scores: Tensor,\n eos_token: Tensor,\n beam_size: int,\n ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n # expand 2 * beam_size in case half of them are eos tokens\n double_beam_size = 2 * beam_size\n\n if bool(final_step):\n\n # at final step, we just select eos token and its corresponding score\n # as best_tokens and eos_scores.\n cand_tokens = eos_token.repeat(double_beam_size)\n # eos_scores size: (beam_size, 1, 1)\n eos_scores = average_log_probs_with_rewards.index_select(\n dim=2, index=eos_token\n )\n eos_scores_flat = eos_scores.view(-1)\n cand_scores = prev_scores.view(-1) + eos_scores_flat\n # cand_scores size: beam_size -> 2 * beam_size\n cand_scores = cand_scores.repeat(2)\n cand_prev_hypos = torch.arange(0, double_beam_size).type_as(cand_tokens)\n cand_attention_weights = average_attn_weights.squeeze(1).repeat(2, 1)\n # active_hypos size: beam_size\n active_hypos = torch.arange(0, beam_size).type_as(cand_tokens)\n else:\n\n # Here we keep consistent with SequenceGenerator, take 2*beam_size best\n # predictions per step, will select top beam_size of these which don't\n # predict eos to continue with.\n cand_scores_k_by_2k, cand_tokens_k_by_2k = torch.topk(\n average_log_probs_with_rewards.squeeze(1), k=double_beam_size\n )\n\n prev_scores_k_by_2k = prev_scores.view(-1, 1).expand(\n -1, double_beam_size\n )\n # size is (beam_size, 2 * beam_size)\n total_scores_k_by_2k = cand_scores_k_by_2k + prev_scores_k_by_2k\n\n total_scores_flat_2k = total_scores_k_by_2k.view(-1)\n # size is (beam_size * 2 * beam_size)\n cand_tokens_flat_2k = cand_tokens_k_by_2k.view(-1)\n # size is (2 * beam_size)\n cand_scores, cand_indices = torch.topk(\n total_scores_flat_2k, k=double_beam_size\n )\n # size is (2 * beam_size)\n cand_tokens = cand_tokens_flat_2k.index_select(\n dim=0, index=cand_indices\n ).view(-1)\n\n # size is (2 * beam_size)\n eos_mask = cand_tokens.eq(eos_token[0])\n cand_prev_hypos = cand_indices // double_beam_size\n cand_prev_hypos = cand_prev_hypos.type_as(cand_tokens)\n\n cand_offsets = torch.arange(0, double_beam_size)\n\n active_mask = torch.add(\n eos_mask.type_as(cand_offsets) * double_beam_size, cand_offsets\n )\n # select active hypos, size is (beam_size)\n _, active_hypos = torch.topk(\n active_mask, k=beam_size, dim=0, largest=False, sorted=True\n )\n\n cand_attention_weights = average_attn_weights.index_select(\n dim=0, index=cand_prev_hypos\n ).squeeze(1)\n\n return (\n cand_tokens,\n cand_scores,\n cand_prev_hypos,\n cand_attention_weights,\n active_hypos,\n )\n\n (\n cand_tokens,\n cand_scores,\n cand_prev_hypos,\n cand_attention_weights,\n active_hypos,\n ) = generate_outputs(\n final_step,\n average_log_probs_with_rewards,\n average_attn_weights,\n prev_scores,\n eos_token=eos_token,\n beam_size=self.beam_size,\n )\n\n # select active prev_hypos\n active_prev_hypos = cand_prev_hypos.index_select(dim=0, index=active_hypos)\n if possible_translation_tokens is not None:\n cand_tokens = possible_translation_tokens.index_select(\n dim=0, index=cand_tokens\n )\n\n self.input_names = [\"prev_tokens\", \"prev_scores\", \"active_hypos\", \"timestep\"]\n for i in range(len(self.models)):\n self.input_names.append(f\"fixed_input_{i}\")\n\n if possible_translation_tokens is not None:\n self.input_names.append(\"possible_translation_tokens\")\n\n active_outputs = [\n cand_tokens,\n cand_scores,\n cand_prev_hypos,\n cand_attention_weights,\n active_hypos,\n ]\n self.output_names = [\n \"cand_tokens\",\n \"cand_scores\",\n \"cand_prev_hypos\",\n \"cand_attention_weights\",\n \"active_hypos\",\n ]\n for i in range(len(self.models)):\n self.output_names.append(f\"fixed_input_{i}\")\n if self.tile_internal:\n active_outputs.append(inputs[i].repeat(1, self.beam_size, 1))\n else:\n active_outputs.append(inputs[i])\n\n if possible_translation_tokens is not None:\n self.output_names.append(\"possible_translation_tokens\")\n active_outputs.append(possible_translation_tokens)\n\n # just keep states for active_hypos\n for i, state in enumerate(state_outputs):\n beam_axis = beam_axis_per_state[i]\n if beam_axis is None:\n next_state = state\n else:\n next_state = state.index_select(dim=beam_axis, index=active_prev_hypos)\n active_outputs.append(next_state)\n self.output_names.append(f\"state_output_{i}\")\n self.input_names.append(f\"state_input_{i}\")\n\n return tuple(active_outputs)\n\n\nclass BeamDecodeWithEOS(BeamDecode):\n \"\"\"\n Run beam decoding based on the beam search output from\n DecoderBatchedStepEnsemble2BeamWithEOS. The differences compared with BeamDecode is:\n 1.there's no need to check prev_hypos finished or not when trying to get all end\n states since we don't expand at eos token in DecoderBatchedStepEnsemble2BeamWithEOS.\n 2. add extra step for eos token at the end.\n \"\"\"\n\n # TODO: (lizguo) This class will be merged with upstream later.\n @torch.jit.script_method\n def _get_all_end_states(\n self,\n beam_tokens: Tensor,\n beam_scores: Tensor,\n beam_prev_indices: Tensor,\n num_steps: int,\n ) -> Tensor:\n min_score = float(\"inf\")\n min_index = -1\n end_states = torch.jit.annotate(List[Tensor], [])\n\n position = 1\n while bool(position <= num_steps + 1):\n for hyp_index in range(self.beam_size):\n if bool(beam_tokens[position][hyp_index] == self.eos_token_id) or bool(\n position == num_steps + 1\n ):\n hypo_score = float(beam_scores[position][hyp_index])\n if bool(self.length_penalty != 0):\n hypo_score = hypo_score / float(position) ** float(\n self.length_penalty\n )\n end_states, min_score, min_index = self._add_to_end_states(\n end_states,\n min_score,\n torch.tensor([hypo_score, float(position), float(hyp_index)]),\n min_index,\n )\n position = position + 1\n\n end_states = torch.stack(end_states)\n\n _, sorted_end_state_indices = end_states[:, 0].sort(dim=0, descending=True)\n end_states = end_states[sorted_end_state_indices, :]\n return end_states\n\n @torch.jit.script_method\n def _check_dimensions(\n self,\n beam_tokens: Tensor,\n beam_scores: Tensor,\n token_weights: Tensor,\n beam_prev_indices: Tensor,\n num_steps: int,\n ) -> None:\n\n assert (\n beam_tokens.size(1) == 2 * self.beam_size\n ), \"Dimension of beam_tokens : {} and beam size : {} are not consistent\".format(\n beam_tokens.size(), self.beam_size\n )\n assert beam_scores.size(1) == 2 * self.beam_size, (\n \"Dimension of beam_scores : {} and beam size : {} \"\n \"are not consistent\".format(beam_scores.size(), self.beam_size)\n )\n assert token_weights.size(1) == 2 * self.beam_size, (\n \"Dimension of token_weights : {} and beam size : {} \"\n \"are not consistent\".format(token_weights.size(), self.beam_size)\n )\n assert (\n beam_prev_indices.size(1) == 2 * self.beam_size\n ), \"Dimension of beam_prev_indices : {} and beam size : {} \"\n \"are not consistent\".format(beam_prev_indices.size(), self.beam_size)\n\n assert beam_tokens.size(0) <= num_steps + 2, (\n \"Dimension of beam_tokens : {} and num_steps : {} \"\n \"are not consistent\".format(beam_tokens.size(), num_steps)\n )\n assert beam_scores.size(0) <= num_steps + 2, (\n \"Dimension of beam_scores : {} and num_steps : {} \"\n \"are not consistent\".format(beam_scores.size(), num_steps)\n )\n assert token_weights.size(0) <= num_steps + 2, (\n \"Dimension of token_weights : {} and num_steps : {} \"\n \"are not consistent\".format(token_weights.size(), num_steps)\n )\n assert beam_prev_indices.size(0) <= num_steps + 2, (\n \"Dimension of beam_prev_indices : {} and num_steps : {} \"\n \"are not consistent\".format(beam_prev_indices.size(), num_steps)\n )\n\n\nclass BeamSearchAndDecodeV2(torch.jit.ScriptModule):\n \"\"\"\n The difference between BeamSearchAndDecodeV2 and BeamSearchAndDecode is: V2 calls\n DecoderBatchedStepEnsemble2BeamWithEOS instead of DecoderBatchedStepEnsemble when\n running beam search. Also, since extra EOS token has been added, it calls\n BeamDecodeWithEOS when running beam decoding which supports adding extra EOS token.\n \"\"\"\n\n def __init__(\n self,\n models,\n tgt_dict,\n src_tokens,\n src_lengths,\n eos_token_id,\n length_penalty,\n nbest,\n beam_size,\n stop_at_eos,\n word_reward=0,\n unk_reward=0,\n quantize=False,\n ):\n super().__init__()\n\n self.models = models\n self.tgt_dict = tgt_dict\n self.beam_size = torch.jit.Attribute(beam_size, int)\n self.word_reward = torch.jit.Attribute(word_reward, float)\n self.unk_reward = torch.jit.Attribute(unk_reward, float)\n\n encoder_ens = EncoderEnsemble(self.models)\n encoder_ens.enable_precompute_reduced_weights = True\n\n if quantize:\n encoder_ens = torch.jit.quantized.quantize_linear_modules(encoder_ens)\n encoder_ens = torch.jit.quantized.quantize_rnn_cell_modules(encoder_ens)\n\n # not support char source model\n self.is_char_source = False\n enc_inputs = (src_tokens, src_lengths)\n example_encoder_outs = encoder_ens(*enc_inputs)\n self.encoder_ens = torch.jit.trace(\n encoder_ens, enc_inputs, _force_outplace=True\n )\n self.encoder_ens_char_source = FakeCharSourceEncoderEnsemble()\n\n decoder_ens = DecoderBatchedStepEnsemble2BeamWithEOS(\n self.models,\n tgt_dict,\n beam_size,\n word_reward,\n unk_reward,\n tile_internal=False,\n )\n decoder_ens.enable_precompute_reduced_weights = True\n if quantize:\n decoder_ens = torch.jit.quantized.quantize_linear_modules(decoder_ens)\n decoder_ens = torch.jit.quantized.quantize_rnn_cell_modules(decoder_ens)\n decoder_ens = torch.jit.quantized.quantize_rnn_modules(decoder_ens)\n decoder_ens_tile = DecoderBatchedStepEnsemble2BeamWithEOS(\n self.models,\n tgt_dict,\n beam_size,\n word_reward,\n unk_reward,\n tile_internal=True,\n )\n decoder_ens_tile.enable_precompute_reduced_weights = True\n if quantize:\n decoder_ens_tile = torch.jit.quantized.quantize_linear_modules(\n decoder_ens_tile\n )\n decoder_ens_tile = torch.jit.quantized.quantize_rnn_cell_modules(\n decoder_ens_tile\n )\n decoder_ens_tile = torch.jit.quantized.quantize_rnn_modules(\n decoder_ens_tile\n )\n prev_token = torch.LongTensor([0])\n prev_scores = torch.FloatTensor([0.0])\n ts = torch.LongTensor([0])\n final_step = torch.tensor([False], dtype=torch.bool)\n active_hypos = torch.LongTensor([0])\n\n _, _, _, _, _, *tiled_states = decoder_ens_tile(\n prev_token, prev_scores, active_hypos, ts, final_step, *example_encoder_outs\n )\n\n self.decoder_ens_tile = torch.jit.trace(\n decoder_ens_tile,\n (\n prev_token,\n prev_scores,\n active_hypos,\n ts,\n final_step,\n *example_encoder_outs,\n ),\n _force_outplace=True,\n )\n self.decoder_ens = torch.jit.trace(\n decoder_ens,\n (\n prev_token.repeat(self.beam_size),\n prev_scores.repeat(self.beam_size),\n active_hypos.repeat(self.beam_size),\n ts,\n final_step,\n *tiled_states,\n ),\n _force_outplace=True,\n )\n\n self.beam_decode = BeamDecodeWithEOS(\n eos_token_id, length_penalty, nbest, beam_size, stop_at_eos\n )\n\n self.input_names = [\n \"src_tokens\",\n \"src_lengths\",\n \"prev_token\",\n \"prev_scores\",\n \"attn_weights\",\n \"prev_hypos_indices\",\n \"num_steps\",\n ]\n self.output_names = [\n \"beam_output\",\n \"hypothesis_score\",\n \"token_level_scores\",\n \"back_alignment_weights\",\n \"best_indices\",\n ]\n\n @torch.jit.script_method\n def forward(\n self,\n src_tokens: torch.Tensor,\n src_lengths: torch.Tensor,\n prev_token: torch.Tensor,\n prev_scores: torch.Tensor,\n attn_weights: torch.Tensor,\n prev_hypos_indices: torch.Tensor,\n active_hypos: torch.Tensor,\n num_steps: int,\n ) -> List[Tuple[Tensor, float, List[float], Tensor, Tensor]]:\n\n enc_states = self.encoder_ens(src_tokens, src_lengths)\n\n # enc_states ends up being optional because of the above branch, one\n # side returns None. We should never take the path that returns None\n # so we unrap the optional type here.\n enc_states = torch.jit._unwrap_optional(enc_states)\n\n # remove torch.cat, keep things in a list\n all_tokens = [prev_token.repeat(repeats=[2 * self.beam_size])]\n all_scores = [prev_scores.repeat(repeats=[2 * self.beam_size])]\n all_weights = [\n attn_weights.unsqueeze(dim=0).repeat(repeats=[2 * self.beam_size, 1])\n ]\n all_prev_indices = [prev_hypos_indices]\n\n (\n prev_token,\n prev_scores,\n prev_hypos_indices,\n attn_weights,\n active_hypos,\n *states,\n ) = self.decoder_ens_tile(\n prev_token,\n prev_scores,\n active_hypos,\n torch.tensor([0]),\n torch.tensor([False]),\n *enc_states, # noqa\n )\n all_tokens = all_tokens.append(prev_token)\n all_scores = all_scores.append(prev_scores)\n all_weights = all_weights.append(attn_weights)\n all_prev_indices = all_prev_indices.append(prev_hypos_indices)\n\n for i in range(num_steps - 1):\n (\n prev_token,\n prev_scores,\n prev_hypos_indices,\n attn_weights,\n active_hypos,\n *states,\n ) = self.decoder_ens(\n prev_token,\n prev_scores,\n active_hypos,\n torch.tensor([i + 1]),\n torch.tensor([False]),\n *states, # noqa\n )\n\n all_tokens = all_tokens.append(prev_token)\n all_scores = all_scores.append(prev_scores)\n all_weights = all_weights.append(attn_weights)\n all_prev_indices = all_prev_indices.append(prev_hypos_indices)\n\n # add eos token as extra step\n (\n prev_token,\n prev_scores,\n prev_hypos_indices,\n attn_weights,\n active_hypos,\n *states,\n ) = self.decoder_ens(\n prev_token,\n prev_scores,\n active_hypos,\n torch.tensor([num_steps]),\n torch.tensor([True]),\n *states,\n )\n\n all_tokens = all_tokens.append(prev_token)\n all_scores = all_scores.append(prev_scores)\n all_weights = all_weights.append(attn_weights)\n all_prev_indices = all_prev_indices.append(prev_hypos_indices)\n\n outputs = torch.jit.annotate(\n List[Tuple[Tensor, float, List[float], Tensor, Tensor]], []\n )\n outputs = self.beam_decode(\n torch.stack(all_tokens, dim=0),\n torch.stack(all_scores, dim=0),\n torch.stack(all_weights, dim=0),\n torch.stack(all_prev_indices, dim=0),\n num_steps,\n )\n\n return outputs\n\n @classmethod\n def build_from_checkpoints(\n cls,\n checkpoint_filenames,\n src_dict_filename,\n dst_dict_filename,\n beam_size,\n length_penalty,\n nbest,\n word_reward=0,\n unk_reward=0,\n lexical_dict_paths=None,\n ):\n length = 10\n models, _, tgt_dict = load_models_from_checkpoints(\n checkpoint_filenames,\n src_dict_filename,\n dst_dict_filename,\n lexical_dict_paths,\n )\n src_tokens = torch.LongTensor(np.ones((length, 1), dtype=\"int64\"))\n src_lengths = torch.IntTensor(np.array([length], dtype=\"int32\"))\n eos_token_id = tgt_dict.eos()\n\n return cls(\n models,\n tgt_dict,\n src_tokens,\n src_lengths,\n eos_token_id,\n length_penalty=length_penalty,\n nbest=nbest,\n beam_size=beam_size,\n stop_at_eos=True,\n word_reward=word_reward,\n unk_reward=unk_reward,\n quantize=True,\n )\n\n def save_to_pytorch(self, output_path):\n def pack(s):\n if hasattr(s, \"_pack\"):\n s._pack()\n\n def unpack(s):\n if hasattr(s, \"_unpack\"):\n s._unpack()\n\n self.apply(pack)\n torch.jit.save(self, output_path)\n self.apply(unpack)\n" ]
[ [ "torch.jit.save", "torch.LongTensor", "torch.jit.quantized.quantize_rnn_modules", "torch.jit.trace", "torch.cat", "torch.topk", "torch.jit.Attribute", "torch.jit.quantized.quantize_linear_modules", "torch.jit.quantized.quantize_rnn_cell_modules", "torch.jit.annotate", "torch.tensor", "numpy.ones", "torch.FloatTensor", "torch.arange", "torch.stack", "numpy.array", "torch.jit._unwrap_optional" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bomtorazek/carrier-of-tricks-for-classification-pytorch
[ "94ef8f4c38c5c872a615e9f2bca0060bb8c973b2" ]
[ "network/regnet.py" ]
[ "import torch.nn as nn\nimport numpy as np\nimport os\nfrom network.anynet import AnyNet\n\ndef quantize_float(f, q):\n \"\"\"Converts a float to closest non-zero int divisible by q.\"\"\"\n return int(round(f / q) * q)\n\n\ndef adjust_ws_gs_comp(ws, bms, gs):\n \"\"\"Adjusts the compatibility of widths and groups.\"\"\"\n ws_bot = [int(w * b) for w, b in zip(ws, bms)]\n gs = [min(g, w_bot) for g, w_bot in zip(gs, ws_bot)]\n ws_bot = [quantize_float(w_bot, g) for w_bot, g in zip(ws_bot, gs)]\n ws = [int(w_bot / b) for w_bot, b in zip(ws_bot, bms)]\n return ws, gs\n\n\ndef get_stages_from_blocks(ws, rs):\n \"\"\"Gets ws/ds of network at each stage from per block values.\"\"\"\n ts_temp = zip(ws + [0], [0] + ws, rs + [0], [0] + rs)\n ts = [w != wp or r != rp for w, wp, r, rp in ts_temp]\n s_ws = [w for w, t in zip(ws, ts[:-1]) if t]\n s_ds = np.diff([d for d, t in zip(range(len(ts)), ts) if t]).tolist()\n return s_ws, s_ds\n\n\ndef generate_regnet(w_a, w_0, w_m, d, q=8):\n \"\"\"Generates per block ws from RegNet parameters.\"\"\"\n assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0\n ws_cont = np.arange(d) * w_a + w_0\n ks = np.round(np.log(ws_cont / w_0) / np.log(w_m))\n ws = w_0 * np.power(w_m, ks)\n ws = np.round(np.divide(ws, q)) * q\n num_stages, max_stage = len(np.unique(ws)), ks.max() + 1\n ws, ws_cont = ws.astype(int).tolist(), ws_cont.tolist()\n return ws, num_stages, max_stage, ws_cont\n\nclass RegNet(AnyNet):\n \"\"\"RegNetY-1.6GF model.\"\"\"\n def __init__(self, shape, num_classes=2, checkpoint_dir='checkpoint', checkpoint_name='RegNet',):\n self.shape = shape\n self.num_classes = num_classes\n self.checkpoint_dir = checkpoint_dir\n self.checkpoint_name = checkpoint_name\n if len(shape) != 3:\n raise ValueError('Invalid shape: {}'.format(shape))\n self.H, self.W, self.C = shape\n \n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n self.checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name, 'model.pt')\n\n SE_ON = True\n DEPTH = 21\n W0 = 80\n WA = 42.63\n WM = 2.66\n GROUP_W = 24\n\n # Generate RegNet ws per block\n b_ws, num_s, _, _ = generate_regnet(\n w_a=WA, w_0=W0, w_m=WM, d=DEPTH\n )\n # Convert to per stage format\n ws, ds = get_stages_from_blocks(b_ws, b_ws)\n # Generate group widths and bot muls\n gws = [GROUP_W for _ in range(num_s)]\n bms = [1.0 for _ in range(num_s)]\n # Adjust the compatibility of ws and gws\n ws, gws = adjust_ws_gs_comp(ws, bms, gws)\n # Use the same stride for each stage\n ss = [2 for _ in range(num_s)]\n # Use SE for RegNetY\n se_r = 0.25 if SE_ON else None\n # Construct the model\n kwargs = {\n \"stem_type\": \"simple_stem_in\",\n \"stem_w\": 32,\n \"block_type\": \"res_bottleneck_block\",\n \"ss\": ss,\n \"ds\": ds,\n \"ws\": ws,\n \"bms\": bms,\n \"gws\": gws,\n \"se_r\": se_r,\n \"nc\": self.num_classes\n }\n super(RegNet, self).__init__(shape, num_classes, checkpoint_dir, checkpoint_name, **kwargs)" ]
[ [ "numpy.log", "numpy.unique", "numpy.power", "numpy.arange", "numpy.divide" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mcengija/riggingtools
[ "a9e725d3e4419da87f787ff6a0c67bfd28c8f1bb" ]
[ "rig2py/py/plot.py" ]
[ "\"\"\"\nThis file uses matplotlib to render a single frame from a CSV file.\nThe CSV file is just data where every line is a frame. Rig keypoints are assumed to be in the following order:\n\n| Index | Joint |\n| -- | -- |\n| 0 | pelvis |\n| 1 | rHIp |\n| 2 | rKnee |\n| 3 | rAnkle\n| 4 | rToeBase |\n| 5 | lHip |\n| 6 | lKnee |\n| 7 | lAnkle |\n| 8 | lToeBase |\n| 9 | spine2 |\n| 10 | spine3 |\n| 11 | spine4 |\n| 12 | rShoulder |\n| 13 | rElbow |\n| 14 | rWrist |\n| 15 | lShoulder |\n| 16 | lElbow |\n| 17 | lWrist |\n| 18 | baseNeck |\n| 19 | baseHead |\n\nExample file:\n\n x1,y1,z1,x2,y2,z2,...xLastKP,yLastKP,zLastKP\n x1,y1,z1,x2,y2,z2,...xLastKP,yLastKP,zLastKP\n ...\n\"\"\"\n\nimport sys\nimport os\nimport numpy # pip3 install numpy\nimport math\nimport matplotlib.pyplot as plt # pip3 install matplotlib\nfrom mpl_toolkits import mplot3d # required for matplot versions older than 3.1.1\n\ndef addPlot( poseData, ax ):\n\n # Convert the CSV string to a float array\n xyzData = [float(numeric_string) for numeric_string in poseData.split( \",\" )]\n\n # Separate the components\n xPoints = xyzData[ 0::3 ]\n yPoints = xyzData[ 1::3 ]\n zPoints = xyzData[ 2::3 ]\n\n # Normalize positions to the first joint\n xPoints = [ x - xPoints[0] for x in xPoints ]\n yPoints = [ y - yPoints[0] for y in yPoints ]\n zPoints = [ z - zPoints[0] for z in zPoints ]\n\n # Plot individual sections so the lines connect as expected\n plot( range(0,5), ax, xPoints, yPoints, zPoints )\n plot( [0,5,6,7,8], ax, xPoints, yPoints, zPoints )\n plot( [0,9,10,11,18,19], ax, xPoints, yPoints, zPoints )\n plot( [18,12,13,14], ax, xPoints, yPoints, zPoints )\n plot( [18,15,16,17], ax, xPoints, yPoints, zPoints )\n\ndef plot( indices, ax, xPoints, yPoints, zPoints ):\n\n xPointsToPlot = []\n yPointsToPlot = []\n zPointsToPlot = []\n for i in indices:\n #ax.text( xPoints[i], yPoints[i], zPoints[i], str(i) )\n xPointsToPlot.append( xPoints[i] )\n yPointsToPlot.append( yPoints[i] )\n zPointsToPlot.append( zPoints[i] )\n\n ax.plot( xPointsToPlot, \n yPointsToPlot,\n zPointsToPlot,\n '-o' )\n\ndef main( filenames ):\n \n print( \"Plotting the first frame for all characters\" )\n\n # Create and layout our subplots\n numColumns=1\n while math.pow(numColumns,2) < len(filenames):\n numColumns = numColumns + 1\n\n numRows = numColumns\n if numColumns * (numRows - 1) >= len(filenames):\n numRows = numRows - 1\n\n fig = plt.figure()\n plt.axis('off')\n\n i = 1\n for filename in filenames:\n\n # Read the first line\n file = open( filename , 'r' )\n line = file.readline().split( '\\n' )[0]\n file.close()\n\n # Create a new plot\n ax = fig.add_subplot( numRows, numColumns, i, projection=\"3d\" )\n\n # Set limits\n limit=0.5\n ax.set_xlim([-limit,limit])\n ax.set_ylim([-limit,limit])\n ax.set_zlim([-limit,limit])\n ax.axis('off')\n ax.set_title( os.path.splitext(filename)[0] )\n\n # Add the plot\n addPlot( line, ax )\n \n i+=1\n \n # Show the plot\n plt.show()\n \nif __name__ == \"__main__\":\n\n # Assume all arguments are CSV files\n argv = sys.argv[1:]\n if len( argv ) < 1:\n print( \"Usage: <CSV files>\" )\n exit( 0 )\n\n main( argv )" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pedrodaniel10/CRC
[ "faf16f163fc1ac682ed993a63a71fd0a46d6463e" ]
[ "project2/src/mention_network.py" ]
[ "from igraph import *\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom statistics import mean\n\nBINS = 1\n\n\n# Prints degree distribution properties\ndef print_degree_dist_props(histogram):\n print(\"Results:\")\n print(\"Mean:\", round(histogram.mean, 2))\n print(\"Variance:\", round(histogram.var, 2))\n\n\n# Calculates best fit power-law lambda\ndef calc_best_fit(xs, ys):\n data = []\n for i in range(len(xs)):\n for j in range(int(xs[i])):\n data.append(ys[i])\n result = power_law_fit(data)\n return round(result.alpha, 1)\n\n\n# Plots best fit power laws for probability distribution\ndef plot_best_fit(x, alpha, lambda_val):\n plt.plot(x, alpha*x**-lambda_val, \"-\")\n\n\n# Plots degree distribution from igraph.Histogram\ndef plot_degree_dist(histogram):\n # Get frequencies\n xs, ys = zip(*[(left, count) for left, _, count in histogram.bins()])\n xs = np.array(xs)\n ys = np.array(ys)\n\n del_indexes = np.array(xs[(ys < 1)], dtype=\"int\")\n xs = np.delete(xs, del_indexes)\n ys = np.delete(ys, del_indexes)\n\n # Normalize\n ys_norm = [float(i)/sum(ys) for i in ys]\n\n # Plot out-degree distribution\n plt.plot(xs, ys_norm, marker=\".\", linestyle=\"\", markersize=2)\n\n return xs, ys\n\n\n# Calculates out-degree distribution\ndef calc_out_degree(graph):\n print(\"Calculating out-degree distribution...\")\n plt.figure()\n\n histogram = graph.degree_distribution(BINS, mode=OUT)\n xs, ys = plot_degree_dist(histogram)\n\n lambda1 = calc_best_fit(xs, ys)\n\n alpha1 = 20\n\n plot_best_fit(xs[10:65], alpha1, lambda1)\n\n plt.annotate(xy=[15, 0.0001], s=\"λ=\" + str(lambda1))\n\n # Scales and labels\n plt.yscale(\"log\")\n plt.xscale(\"log\")\n plt.xlabel(\"Out-degree\")\n plt.ylabel(\"Probability\")\n plt.show()\n\n print_degree_dist_props(histogram)\n\n\n# Calculates in-degree distribution\ndef calc_in_degree(graph):\n print(\"Calculating in-degree distribution...\")\n plt.figure()\n\n histogram = graph.degree_distribution(BINS, mode=IN)\n xs, ys = plot_degree_dist(histogram)\n\n lambda1 = calc_best_fit(xs, ys)\n\n alpha = 50\n\n plot_best_fit(xs[10:75], alpha, lambda1)\n\n plt.annotate(xy=[25, 0.00018], s=\"λ=\" + str(lambda1))\n\n # Scales and labels\n plt.yscale(\"log\")\n plt.xscale(\"log\")\n plt.xlabel(\"In-degree\")\n plt.ylabel(\"Probability\")\n plt.show()\n\n print_degree_dist_props(histogram)\n\n\n# Calculates total degree distribution\ndef calc_total_degree(graph):\n print(\"Calculating total degree distribution...\")\n plt.figure()\n\n histogram = graph.degree_distribution(BINS, mode=ALL)\n xs, ys = plot_degree_dist(histogram)\n\n lambda1 = calc_best_fit(xs, ys)\n\n alpha1 = 50\n\n plot_best_fit(xs[10:75], alpha1, lambda1)\n\n plt.annotate(xy=[25, 0.0005], s=\"λ=\" + str(lambda1))\n\n # Scales and labels\n plt.yscale(\"log\")\n plt.xscale(\"log\")\n plt.xlabel(\"Total degree\")\n plt.ylabel(\"Probability\")\n plt.show()\n\n print_degree_dist_props(histogram)\n\n\n# Calculates graph assortativity degree\ndef calc_assort_degree(graph):\n print(\"Calculating assortativity degree...\")\n result = graph.assortativity_degree()\n print(\"Result:\", result)\n\n\ndef calc_clustering(graph, n_nodes, n_edges):\n print(\"Calculating the global clustering coefficient...\")\n print(\"Result:\", graph.transitivity_undirected())\n\n print(\"Calculating the average clustering coefficient (average of local coefficients)...\")\n print(\"Result:\", np.mean(graph.transitivity_local_undirected(mode=\"zero\")))\n\n print(\"Calculating SCC...\")\n scc = graph.components(mode=\"STRONG\")\n largest_scc = scc.giant()\n\n largest_scc_nodes = largest_scc.vcount()\n largest_scc_edges = largest_scc.ecount()\n print(\"Nodes in largest SCC\", largest_scc_nodes,\n \"(\" + str(round(largest_scc_nodes/n_nodes, 3)) + \")\")\n print(\"Edges in largest SCC\", largest_scc_edges,\n \"(\" + str(round(largest_scc_edges/n_edges, 3)) + \")\")\n\n print(\"Calculating WCC...\")\n wcc = graph.components(mode=\"WEAK\")\n largest_wcc = wcc.giant()\n\n largest_wcc_nodes = largest_wcc.vcount()\n largest_wcc_edges = largest_wcc.ecount()\n print(\"Nodes in largest WCC\", largest_wcc_nodes,\n \"(\" + str(round(largest_wcc_nodes/n_nodes, 3)) + \")\")\n print(\"Edges in largest WCC\", largest_wcc_edges,\n \"(\" + str(round(largest_wcc_edges/n_edges, 3)) + \")\")\n\n\ndef calc_short_path(graph):\n print(\"Calculating diameter...\")\n print(\"Result:\", graph.diameter())\n\n print(\"Calculating average path length...\")\n print(\"Result:\", graph.average_path_length())\n" ]
[ [ "matplotlib.pyplot.yscale", "matplotlib.pyplot.plot", "numpy.delete", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xscale", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kishorkuttan/pytorch-lightning
[ "f8996385fb48f18c5655937ca199a0b5a9d7e320" ]
[ "pytorch_lightning/accelerators/base_backend.py" ]
[ "import math\nfrom enum import Enum\nfrom typing import Any\n\nimport torch\n\nfrom pytorch_lightning.utilities import AMPType, rank_zero_warn\nfrom pytorch_lightning.utilities.apply_func import move_data_to_device\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.parsing import AttributeDict\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\nEPSILON = 1e-6\nEPSILON_FP16 = 1e-5\n\n\nclass Accelerator(object):\n\n def __init__(self, trainer):\n self.trainer = trainer\n self.dist = AttributeDict(rank=0, device=None)\n\n def setup(self, model):\n pass\n\n def teardown(self):\n pass\n\n def barrier(self, name: str = None):\n pass\n\n def broadcast(self, obj, src=0):\n return obj\n\n def train_or_test(self):\n if self.trainer.testing:\n results = self.trainer.run_test()\n else:\n results = self.trainer.train()\n return results\n\n def batch_to_device(self, batch: Any, device: torch.device):\n model = self.trainer.get_model()\n if model is not None:\n return model.transfer_batch_to_device(batch, device)\n return move_data_to_device(batch, device)\n\n def training_step_end(self, output):\n return output\n\n def test_step_end(self, output):\n return output\n\n def validation_step_end(self, output):\n return output\n\n def process_dataloader(self, dataloader):\n return dataloader\n\n def backward(self, closure_loss, optimizer, opt_idx):\n model_ref = self.trainer.get_model()\n\n # scale loss for 16 bit\n if self.trainer.precision == 16:\n closure_loss = model_ref.amp_scale_loss(\n closure_loss,\n optimizer,\n opt_idx,\n amp_backend=self.trainer.amp_backend\n )\n\n # enter amp context\n if self.trainer.amp_backend == AMPType.APEX:\n self.trainer.dev_debugger.track_event('AMP', str(AMPType.APEX))\n context = closure_loss\n closure_loss = closure_loss.__enter__()\n\n # do backward pass\n model_ref.backward(self, closure_loss, optimizer, opt_idx)\n\n # exit amp context\n if self.trainer.precision == 16 and self.trainer.amp_backend == AMPType.APEX:\n a, b, c = None, None, None\n error = context.__exit__(a, b, c)\n if error:\n rank_zero_warn(a, b, c)\n raise Exception('apex unscale error')\n\n # once backward has been applied, release graph\n closure_loss = closure_loss.detach()\n return closure_loss\n\n def optimizer_step(self, optimizer, batch_idx, opt_idx, lambda_closure):\n model_ref = self.trainer.get_model()\n is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)\n native_amp = self.trainer.amp_backend == AMPType.NATIVE\n\n # native amp + lbfgs is a no go right now\n if native_amp and is_lbfgs:\n raise MisconfigurationException(\n 'native PyTorch amp and lbfgs are not compatible.'\n ' To request, please file a Github issue in PyTorch and tag @mcarilli')\n\n # model hook\n model_ref.optimizer_step(\n self.trainer.current_epoch,\n batch_idx,\n optimizer,\n opt_idx,\n lambda_closure,\n using_native_amp=native_amp,\n using_lbfgs=is_lbfgs\n )\n\n # scale when native amp\n if native_amp:\n self.trainer.scaler.update()\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n model_ref = self.trainer.get_model()\n model_ref.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)\n\n def clip_gradients(self, optimizer):\n\n if self.trainer.amp_backend == AMPType.NATIVE:\n self.trainer.scaler.unscale_(optimizer)\n\n # apply clip gradients\n # TODO: separate TPU case from here\n self._clip_gradients(optimizer)\n\n def _clip_gradients(self, optimizer):\n # this code is a modification of torch.nn.utils.clip_grad_norm_\n # with TPU support based on https://github.com/pytorch/xla/blob/master/TROUBLESHOOTING.md\n if self.trainer.gradient_clip_val <= 0:\n return\n\n model = self.trainer.get_model()\n if self.trainer.amp_backend == AMPType.APEX:\n parameters = amp.master_params(optimizer)\n else:\n parameters = model.parameters()\n\n max_norm = float(self.trainer.gradient_clip_val)\n norm_type = float(2.0)\n\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n\n if norm_type == math.inf:\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n else:\n device = parameters[0].device\n out = torch.empty(len(parameters), device=device)\n for i, p in enumerate(parameters):\n torch.norm(p.grad.data.to(device), norm_type, out=out[i])\n total_norm = torch.norm(out, norm_type)\n\n eps = EPSILON_FP16 if self.trainer.precision == 16 else EPSILON\n clip_coef = torch.tensor(max_norm, device=device) / (total_norm + eps)\n clip_coef = torch.min(clip_coef, torch.ones_like(clip_coef))\n for p in parameters:\n p.grad.data.mul_(clip_coef.to(p.grad.data.device))\n\n def on_train_epoch_end(self):\n pass\n\n def on_train_end(self):\n pass\n\n def early_stopping_should_stop(self, pl_module):\n return self.trainer.should_stop\n\n def setup_optimizers(self, model):\n if self.trainer.testing is True:\n return\n\n optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)\n self.trainer.optimizers = optimizers\n self.trainer.lr_schedulers = lr_schedulers\n self.trainer.optimizer_frequencies = optimizer_frequencies\n\n\nclass BackendType(Enum):\n DP = 'dp'\n DDP = 'ddp'\n DDP2 = 'ddp2'\n DDP_SPAWN = 'ddp_spawn'\n DDP_CPU = 'ddp_cpu'\n TPU = 'tpu'\n HOROVOD = 'horovod'\n" ]
[ [ "torch.norm", "torch.ones_like", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
prtkbansal/search_with_machine_learning_course
[ "654ec9563c651de8be83658cd13e2beacaee91ec" ]
[ "week2/utilities/data_prepper.py" ]
[ "# This file processes our queries, runs them through OpenSearch against the BBuy Products index to fetch their \"rank\" and so they can be used properly in a click model\n\nimport ltr_utils as lu\nimport numpy as np\nimport pandas as pd\nimport query_utils as qu\nfrom opensearchpy import RequestError\nimport os\n\n# from importlib import reload\n\nclass DataPrepper:\n opensearch = None\n index_name = \"bbuy_products\"\n ltr_store_name = \"week2\"\n\n\n def __init__(self, opensearch_client, featureset_name=\"bbuy_product_featureset\", index_name=\"bbuy_products\",\n ltr_store_name=\"week2\") -> None:\n self.opensearch = opensearch_client\n self.featureset_name = featureset_name\n self.index_name = index_name\n self.ltr_store_name = ltr_store_name\n\n def __get_query_id(self, query, query_ids_map, query_counter):\n qid = query_ids_map.get(query, None)\n if not qid:\n query_counter += 1\n qid = query_counter\n query_ids_map[query] = qid\n return qid, query_counter\n\n def filter_junk_clicks(self, clicks_df, verify_file, output_dir):\n # remove sale/promotional queries like: `LaborDay_HomeAppliances_20110902`\n print(\"Clicks pre filtering: %s\" % len(clicks_df))\n clicks_df = clicks_df[clicks_df[\"query\"].str.match(\"\\w+_(\\w+_)?[\\w+|\\d+]\") == False]\n #print(\"Clicks post filtering promos: %s\" % len(clicks_df))\n verify_file_path = \"%s/%s\" % (output_dir, verify_file)\n print(\"Verify info: flag: %s, path: %s, exists: %s\" % (verify_file, verify_file_path, os.path.exists(verify_file_path)))\n if verify_file and os.path.exists(verify_file_path):\n verify_file = pd.read_csv(verify_file_path)\n good = verify_file[verify_file[\"status\"] == 1]\n clicks_df = pd.merge(clicks_df, good, on=\"sku\", how=\"right\")\n print(\"Clicks post filtering: %s\" % len(clicks_df))\n return clicks_df\n\n\n def create_splits(self, file_to_split, split_train, split_test, output_dir, train_rows, test_rows, verify_file):\n print(\"Splitting: %s and writing train to: %s and test to: %s in %s\" % (\n file_to_split, split_train, split_test, output_dir))\n input_df = pd.read_csv(file_to_split, parse_dates=['click_time', 'query_time'])\n #input_df = input_df.astype({'click_time': 'datetime64', 'query_time':'datetime64'})\n input_df = self.filter_junk_clicks(input_df, verify_file, output_dir)\n # first we are going to split by date\n half = input_df['click_time'].median()\n first = input_df[input_df['click_time'] <= half]\n second = input_df[input_df['click_time'] > half]\n # for testing, we should still allow for splitting into less rows\n if train_rows > 0:\n # if we are using less than the full set, then shuffle them\n first = first.sample(frac=1).reset_index(drop=True) # shuffle things\n first = first[:min(len(first), train_rows)]\n if test_rows > 0:\n # if we are using less than the full set, then shuffle them\n second = second.sample(frac=1).reset_index(drop=True) # shuffle things\n second = second[:min(len(second), test_rows)]\n #train, test = model_selection.train_test_split(input_df, test_size=args.split_test_size)\n #input_df = input_df.sample(frac=1).reset_index(drop=True) # shuffle things\n first.to_csv(\"%s/%s\" % (output_dir, split_train), index=False)\n second.to_csv(\"%s/%s\" % (output_dir, split_test), index=False)\n\n # Use the set of clicks and assume the clicks are in proportion to the actual rankings due to position bias\n #\n ## CAVEAT EMPTOR: WE ARE BUILDING A SYNTHETIC IMPRESSIONS DATA SET BECAUSE WE DON'T HAVE A PROPER ONE.\n #\n #\n def synthesize_impressions(self, clicks_df, min_impressions=20, min_clicks=5):\n pairs = clicks_df.groupby(['query', 'sku']).size().reset_index(name='clicks')\n pairs['rank'] = pairs.groupby('query')['clicks'].rank('dense', ascending=False)\n pairs['num_impressions'] = pairs.groupby('query')['clicks'].transform('sum')\n # cut off the extreme end of the long tail due to low confidence in the evidence\n pairs = pairs[(pairs['num_impressions'] >= min_impressions) & (pairs['clicks'] >= min_clicks)]\n\n pairs['doc_id'] = pairs['sku'] # not technically the doc id, but since we aren't doing a search...\n pairs['product_name'] = \"fake\"\n query_ids = []\n query_ids_map = {}\n query_counter = 1\n for item in pairs.itertuples():\n query_id, query_counter = self.__get_query_id(item.query, query_ids_map, query_counter)\n query_ids.append(query_id)\n\n pairs[\"query_id\"] = query_ids\n return (pairs, query_ids_map)\n\n #########\n ##\n ## CAVEAT EMPTOR: WE ARE BUILDING A SYNTHETIC IMPRESSIONS DATA SET BECAUSE WE DON'T HAVE A PROPER ONE.\n ## YOU WOULD NOT DO THIS IN THE REAL WORLD, BUT YOU WOULD DO SOMETHING SIMILAR BY LOGGING YOUR IMPRESSIONS, BOTH POSITIVE AND NEGATIVE\n ##\n #########\n # For each query, send it to OpenSearch and log all the documents we retrieved and their position\n # Return the impressions data as a Pandas Data Frame\n def generate_impressions(self, query_df, query_ids_map, retrieval_size=50, min_impressions=20, min_clicks=5):\n # Capture judgments info into a dict of arrays so we can convert it to a Data Frame\n query_counter = 1\n query_ids_list = []\n\n query_strs = []\n doc_ids = []\n ranks = []\n clicks = []\n num_impressions = []\n product_names = []\n skus = []\n query_gb = query_df.groupby(\"query\") # small\n no_results = set()\n for key in query_gb.groups.keys():\n query_id, query_counter = self.__get_query_id(key, query_ids_map, query_counter)\n #print(\"Q[%s]: %s\" % (query_id, key))\n query_times_seen = 0 # careful here\n prior_clicks_for_query = query_gb.get_group(key)\n prior_doc_ids = None\n prior_doc_id_weights = None\n if prior_clicks_for_query is not None and len(prior_clicks_for_query) > 0:\n prior_doc_ids = prior_clicks_for_query.sku.drop_duplicates()\n prior_doc_id_weights = prior_clicks_for_query.sku.value_counts() # histogram gives us the click counts for all the doc_ids\n query_times_seen = prior_clicks_for_query.sku.count()\n click_prior_query = qu.create_prior_queries(prior_doc_ids, prior_doc_id_weights, query_times_seen)\n query_obj = qu.create_query(key, click_prior_query, filters=None, size=retrieval_size, include_aggs=False, highlight=False,\n source=[\"name\", \"sku\"]) # TODO: handle categories\n # Fetch way more than usual so we are likely to see our documents that have been clicked\n try:\n response = self.opensearch.search(body=query_obj, index=self.index_name)\n except RequestError as re:\n print(re, query_obj)\n else:\n if response and response['hits']['hits'] and len(response['hits']['hits']) > 0:\n # we have a response with some hits\n hits = response['hits']['hits']\n # print(hits)\n skus_for_query = prior_clicks_for_query.sku.drop_duplicates() # we are comparing skus later, so grab the Series now\n\n total_clicked_docs_per_query = 0\n for (idx, hit) in enumerate(hits):\n query_ids_list.append(query_id)\n query_strs.append(key)\n doc_ids.append(hit['_id'])\n ranks.append(idx)\n sku = int(hit['_source']['sku'][0])\n skus.append(sku)\n num_clicks = self.__num_clicks(skus_for_query, sku)\n if num_clicks > 0:\n total_clicked_docs_per_query += 1\n num_impressions.append(query_times_seen)\n clicks.append(num_clicks)\n if hit['_source'].get('name') is not None:\n product_names.append(hit['_source']['name'][0])\n else:\n product_names.append(\"SKU: %s -- No Name\" % sku)\n # print(\"Name: {}\\n\\nDesc: {}\\n\".format(hit['_source']['name'], hit['_source']['shortDescription']))\n\n #print(\"\\tQ[%s]: %s clicked\" % (query_id, total_clicked_docs_per_query))\n else:\n if response and (response['hits']['hits'] == None or len(response['hits']['hits']) == 0):\n print(\"No results for query: %s\" % key)\n no_results.add(key)\n else:\n print(response)\n print(\"Invalid response for query %s\" % query_obj)\n print(\"Zero results queries: %s\" % no_results)\n impressions_df = pd.DataFrame({\n \"query_id\": query_ids_list,\n \"query\": query_strs,\n \"doc_id\": doc_ids,\n \"rank\": ranks,\n \"clicks\": clicks,\n \"sku\": skus,\n \"num_impressions\": num_impressions,\n \"product_name\": product_names\n })\n # remove low click/impressions,\n #remove low click/impressions\n impressions_df = impressions_df[(impressions_df['num_impressions'] >= min_impressions) & (impressions_df['clicks'] >= min_clicks)]\n\n return impressions_df, query_ids_map\n\n def log_features(self, train_data_df, terms_field=\"_id\"):\n feature_frames = []\n query_gb = train_data_df.groupby(\"query\")\n no_results = {}\n ctr = 0\n #print(\"Number queries: %s\" % query_gb.count())\n for key in query_gb.groups.keys():\n if ctr % 500 == 0:\n print(\"Progress[%s]: %s\" % (ctr, key))\n ctr += 1\n # get all the docs ids for this query\n group = query_gb.get_group(key)\n doc_ids = group.doc_id.values\n\n if isinstance(doc_ids, np.ndarray):\n doc_ids = doc_ids.tolist()\n click_prior_query = qu.create_prior_queries_from_group(group)\n ltr_feats_df = self.__log_ltr_query_features(group[:1][\"query_id\"], key, doc_ids, click_prior_query, no_results,\n terms_field=terms_field)\n if ltr_feats_df is not None:\n feature_frames.append(ltr_feats_df)\n\n features_df = None\n if len(feature_frames) > 0:\n features_df = pd.concat(feature_frames)\n print(\"The following queries produced no results: %s\" % no_results)\n return features_df\n\n # Features look like:\n # {'log_entry': [{'name': 'title_match',\n # 'value': 7.221403},\n # {'name': 'shortDescription_match'},\n # {'name': 'longDescription_match'},\n # {'name': 'onsale_function', 'value': 0.0},\n # {'name': 'short_term_rank_function', 'value': 1922.0},\n # {'name': 'medium_term_rank_function', 'value': 7831.0},\n # {'name': 'long_term_rank_function', 'value': 4431.0},\n # {'name': 'sale_price_function', 'value': 949.99},\n # {'name': 'price_function', 'value': 0.0}]}]\n # For each query, make a request to OpenSearch with SLTR logging on and extract the features\n def __log_ltr_query_features(self, query_id, key, query_doc_ids, click_prior_query, no_results, terms_field=\"_id\"):\n\n log_query = lu.create_feature_log_query(key, query_doc_ids, click_prior_query, self.featureset_name,\n self.ltr_store_name,\n size=len(query_doc_ids), terms_field=terms_field)\n # IMPLEMENT_START --\n print(\"IMPLEMENT ME: __log_ltr_query_features: Extract log features out of the LTR:EXT response and place in a data frame\")\n # Loop over the hits structure returned by running `log_query` and then extract out the features from the response per query_id and doc id. Also capture and return all query/doc pairs that didn't return features\n # Your structure should look like the data frame below\n feature_results = {}\n feature_results[\"doc_id\"] = [] # capture the doc id so we can join later\n feature_results[\"query_id\"] = [] # ^^^\n feature_results[\"sku\"] = []\n feature_results[\"salePrice\"] = []\n feature_results[\"name_match\"] = []\n rng = np.random.default_rng(12345)\n for doc_id in query_doc_ids:\n feature_results[\"doc_id\"].append(doc_id) # capture the doc id so we can join later\n feature_results[\"query_id\"].append(query_id)\n feature_results[\"sku\"].append(doc_id) # ^^^\n feature_results[\"salePrice\"].append(rng.random())\n feature_results[\"name_match\"].append(rng.random())\n frame = pd.DataFrame(feature_results)\n return frame.astype({'doc_id': 'int64', 'query_id': 'int64', 'sku': 'int64'})\n # IMPLEMENT_END\n\n # Can try out normalizing data, but for XGb, you really don't have to since it is just finding splits\n def normalize_data(self, ranks_features_df, feature_set, normalize_type_map):\n # we need to get some stats from OpenSearch and then use that to normalize our data\n agg_fields = []\n aggs = {}\n for feature in feature_set['featureset']['features']:\n func_temp = feature['template'].get(\"function_score\")\n if func_temp is not None:\n # get the field\n funcs = func_temp.get(\"functions\",\n [func_temp.get(\"field_value_factor\")]) # could also be a field_value_factor alone\n for func in funcs:\n agg_fields.append(func['field_value_factor']['field'])\n stats_query = qu.create_stats_query(agg_fields)\n try:\n response = self.opensearch.search(stats_query, self.index_name)\n except RequestError as re:\n print(\"Unable to get aggs: %s\\t%s\" % (stats_query, re))\n raise re\n else:\n # we now have an OpenSearch response with a bunch of stats info. We mainly care about min/max/avg/std.dev\n if response and response['aggregations'] and len(response['aggregations']) > 0:\n aggs = response['aggregations']\n\n # Initialize with the identify function for every\n for agg in agg_fields:\n stats = aggs[agg]\n # print(\"agg: %s: %s\" %(agg, stats))\n norm_type = normalize_type_map.get(agg, \"default\")\n # We only support these two since they are the two main normalizers in the LTR plugin\n if norm_type == \"min-max\":\n min = stats[\"min\"]\n max = stats[\"max\"]\n max_min = max - min\n ranks_features_df[\"%s_norm\" % agg] = ranks_features_df[agg].apply(lambda x: (x - min) / max_min)\n elif norm_type == \"std-dev\":\n avg = stats[\"avg\"]\n std_dev = stats[\"std_deviation\"]\n ranks_features_df[\"%s_norm\" % agg] = ranks_features_df[agg].apply(lambda x: (x - avg) / std_dev)\n # else:\n # Do nothing for now\n else:\n print(\"No aggregations found in %s\" % response)\n return (ranks_features_df,\n aggs) # return out the aggregations, bc we are going to need it to write the model normalizers\n\n # Determine the number of clicks for this sku given a query (represented by the click group)\n def __num_clicks(self, all_skus_for_query, test_sku):\n print(\"IMPLEMENT ME: __num_clicks(): Return how many clicks the given sku received in the set of skus passed \")\n return 0\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.concat", "pandas.DataFrame", "numpy.random.default_rng" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
nat-chan/matplotlib-sixel
[ "5364fcae0a5851d292c7f68911ee3bd164141433" ]
[ "matplotlib-sixel/sixel.py" ]
[ "\n\n\"\"\"\nA matplotlib backend for displaying figures via sixel terminal graphics\n\nBased on the ipykernel source code \"backend_inline.py\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\"\"\"\n\n\nimport matplotlib\n\nfrom matplotlib._pylab_helpers import Gcf\nfrom subprocess import Popen, PIPE\n\nfrom .xterm import xterm_pixels\n\nfrom matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg\nnew_figure_manager # for check\n\n\ndef resize_fig(figure):\n \"\"\" resize figure size, so that it fits into the terminal\n\n Checks the width and height\n Only makes the figure smaller\n\n \"\"\"\n dpi = figure.get_dpi()\n size = figure.get_size_inches() # w, h\n pixel_size = size * dpi\n\n pixel_factor = pixel_size / xterm_pixels()\n\n factor = max(max(pixel_factor), 1)\n\n size /= factor\n\n figure.set_size_inches(size)\n print(size)\n\n\ndef display(figure):\n \"\"\" Display figure on stdout as sixel graphic \"\"\"\n\n resize_fig(figure)\n\n p = Popen([\"img2sixel\"], stdin=PIPE)\n figure.savefig(p.stdin, format='png')\n p.stdin.close()\n p.wait()\n\n\ndef show(close=False, block=None):\n \"\"\"Show all figures as SVG/PNG payloads sent to the IPython clients.\n\n Parameters\n ----------\n close : bool, optional\n If true, a ``plt.close('all')`` call is automatically issued after\n sending all the figures. If this is set, the figures will entirely\n removed from the internal list of figures.\n block : Not used.\n The `block` parameter is a Matplotlib experimental parameter.\n We accept it in the function signature for compatibility with other\n backends.\n \"\"\"\n try:\n for figure_manager in Gcf.get_all_fig_managers():\n display(figure_manager.canvas.figure)\n finally:\n show._to_draw = []\n # only call close('all') if any to close\n # close triggers gc.collect, which can be slow\n if close and Gcf.get_all_fig_managers():\n matplotlib.pyplot.close('all')\n\n\n# This flag will be reset by draw_if_interactive when called\nshow._draw_called = False\n# list of figures to draw when flush_figures is called\nshow._to_draw = []\n\n\ndef draw_if_interactive():\n \"\"\"\n Is called after every pylab drawing command\n \"\"\"\n # signal that the current active figure should be sent at the end of\n # execution. Also sets the _draw_called flag, signaling that there will be\n # something to send. At the end of the code execution, a separate call to\n # flush_figures() will act upon these values\n manager = Gcf.get_active()\n if manager is None:\n return\n fig = manager.canvas.figure\n\n # Hack: matplotlib FigureManager objects in interacive backends (at least\n # in some of them) monkeypatch the figure object and add a .show() method\n # to it. This applies the same monkeypatch in order to support user code\n # that might expect `.show()` to be part of the official API of figure\n # objects.\n # For further reference:\n # https://github.com/ipython/ipython/issues/1612\n # https://github.com/matplotlib/matplotlib/issues/835\n\n if not hasattr(fig, 'show'):\n # Queue up `fig` for display\n fig.show = lambda *a: display(fig)\n\n # If matplotlib was manually set to non-interactive mode, this function\n # should be a no-op (otherwise we'll generate duplicate plots, since a user\n # who set ioff() manually expects to make separate draw/show calls).\n if not matplotlib.is_interactive():\n return\n\n # ensure current figure will be drawn, and each subsequent call\n # of draw_if_interactive() moves the active figure to ensure it is\n # drawn last\n try:\n show._to_draw.remove(fig)\n except ValueError:\n # ensure it only appears in the draw list once\n pass\n # Queue up the figure for drawing in next show() call\n show._to_draw.append(fig)\n show._draw_called = True\n\n\ndef flush_figures():\n \"\"\"Send all figures that changed\n\n This is meant to be called automatically and will call show() if, during\n prior code execution, there had been any calls to draw_if_interactive.\n\n This function is meant to be used as a post_execute callback in IPython,\n so user-caused errors are handled with showtraceback() instead of being\n allowed to raise. If this function is not called from within IPython,\n then these exceptions will raise.\n \"\"\"\n if not show._draw_called:\n return\n\n try:\n # exclude any figures that were closed:\n active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()])\n for fig in [fig for fig in show._to_draw if fig in active]:\n display(fig)\n finally:\n # clear flags for next round\n show._to_draw = []\n show._draw_called = False\n\n\n# Changes to matplotlib in version 1.2 requires a mpl backend to supply a\n# default figurecanvas. This is set here to a Agg canvas\n# See https://github.com/matplotlib/matplotlib/pull/1125\nFigureCanvas = FigureCanvasAgg\n" ]
[ [ "matplotlib.pyplot.close", "matplotlib._pylab_helpers.Gcf.get_all_fig_managers", "matplotlib.is_interactive", "matplotlib._pylab_helpers.Gcf.get_active" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MikeD89/Advent2020
[ "84dd3723ceefb69fea45bc53d4116649fd58ff47" ]
[ "code/day17.py" ]
[ "from utils import utils\nimport numpy as np\nfrom scipy.signal import convolve\n\nday = 17\n\ntD = \"\"\"\n.#.\n..#\n###\n\"\"\"\n\nactive = '#'\ninactive = '.'\n\n\nclass DimentionalConway:\n # The game of seats\n def __init__(self, data, dimensions):\n self.dimensions = dimensions\n\n # Load the data into numpy\n def parse(x): return x == active\n self.data = np.array([list(d) for d in data])\n self.data = parse(self.data).astype(int)\n\n # already 2d, expand to N dimensions\n for _ in range(dimensions - 2):\n self.data = np.expand_dims(self.data, 0)\n\n # create a kernel based on N dimensions - make us have no weight\n self.kernel = np.ones((3, )*dimensions, dtype=int)\n self.kernel[(1, )*dimensions] = 0\n\n def cycle(self):\n # Add room for our data to grow\n self.data = np.pad(self.data, pad_width=1, mode='constant', constant_values=False)\n\n # Convolve our kernal\n neighbors = convolve(self.data, self.kernel, mode='same')\n\n # Work our which to set active and inactive before changing the data\n neighbour_check = np.logical_or(neighbors < 2, neighbors > 3)\n active = np.logical_and(self.data == 0, neighbors == 3)\n inactive = np.logical_and(self.data == 1, neighbour_check)\n\n # Set up data!\n self.data[active] = 1\n self.data[inactive] = 0\n\n def count(self):\n return np.sum(self.data)\n\n def iterate(self, times):\n for _ in range(times):\n self.cycle()\n return self.count()\n\n\ndef process_data(data):\n # data = utils.load_test_data(tD)\n return data\n\n\ndef partOne(data):\n game = DimentionalConway(data, 3)\n game.iterate(6)\n return game.count()\n\n\ndef partTwo(data):\n game = DimentionalConway(data, 4)\n game.iterate(6)\n return game.count()\n\n\nif __name__ == \"__main__\":\n utils.run(day, process_data, None, partOne, partTwo)\n" ]
[ [ "numpy.expand_dims", "numpy.pad", "numpy.ones", "numpy.logical_or", "numpy.logical_and", "scipy.signal.convolve", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
segalon/SpectralNet-pytorch
[ "a9077f6d71e64e3ba9c215fd2edeb743359b18f4" ]
[ "spectral_net/network.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\ndef orthonorm(Q, eps=1e-7):\n m = torch.tensor(Q.shape[0]) # batch size\n outer_prod = torch.mm(Q.T, Q)\n outer_prod = outer_prod + eps * torch.eye(outer_prod.shape[0])\n\n L = torch.linalg.cholesky(outer_prod) # lower triangular\n L_inv = torch.linalg.inv(L)\n return torch.sqrt(m) * L_inv.T\n\n\nclass NetOrtho(nn.Module):\n def __init__(self, params):\n super(NetOrtho, self).__init__()\n self.params = params\n\n input_sz = params['input_sz']\n n_hidden_1 = params['n_hidden_1']\n n_hidden_2 = params['n_hidden_2']\n k = params['k']\n\n self.fc1 = nn.Linear(input_sz, n_hidden_1)\n self.fc2 = nn.Linear(n_hidden_1, n_hidden_1)\n self.fc3 = nn.Linear(n_hidden_1, n_hidden_2)\n self.fc4 = nn.Linear(n_hidden_2, k)\n\n self.A = torch.rand(k,k)\n self.A.requires_grad = False\n\n\n def forward(self, x, ortho_step=False):\n self.A.requires_grad = False\n if ortho_step:\n with torch.no_grad():\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n Y_tilde = torch.tanh(self.fc4(x))\n\n self.A = orthonorm(Y_tilde, eps=self.params['epsilon'])\n self.A.requires_grad = False\n\n # for debugging\n Y = torch.mm(Y_tilde, self.A)\n res = (1/Y.shape[0]) * torch.mm(Y.T, Y)\n return res\n\n else:\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n Y_tilde = torch.tanh(self.fc4(x))\n # need to multiply from the right, not from the left\n Y = torch.mm(Y_tilde, self.A)\n return Y\n\n" ]
[ [ "torch.linalg.inv", "torch.linalg.cholesky", "torch.mm", "torch.sqrt", "torch.eye", "torch.tensor", "torch.nn.Linear", "torch.no_grad", "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AGOberprieler/GinJinn2
[ "527feac125f476165e332277823c11016565f99d" ]
[ "ginjinn/segmentation_refinement/eval_helper.py" ]
[ "import torch\nimport torch.nn.functional as F\n\ndef resize_max_side(im, size, method):\n h, w = im.shape[-2:]\n max_side = max(h, w)\n ratio = size / max_side\n if method in ['bilinear', 'bicubic']:\n return F.interpolate(im, scale_factor=ratio, mode=method, align_corners=False, recompute_scale_factor=True)\n else:\n return F.interpolate(im, scale_factor=ratio, mode=method, recompute_scale_factor=True)\n\ndef safe_forward(model, im, seg, inter_s8=None, inter_s4=None):\n \"\"\"\n Slightly pads the input image such that its length is a multiple of 8\n \"\"\"\n b, _, ph, pw = seg.shape\n if (ph % 8 != 0) or (pw % 8 != 0):\n newH = ((ph//8+1)*8)\n newW = ((pw//8+1)*8)\n p_im = torch.zeros(b, 3, newH, newW, device=im.device)\n p_seg = torch.zeros(b, 1, newH, newW, device=im.device) - 1\n\n p_im[:,:,0:ph,0:pw] = im\n p_seg[:,:,0:ph,0:pw] = seg\n im = p_im\n seg = p_seg\n\n if inter_s8 is not None:\n p_inter_s8 = torch.zeros(b, 1, newH, newW, device=im.device) - 1\n p_inter_s8[:,:,0:ph,0:pw] = inter_s8\n inter_s8 = p_inter_s8\n if inter_s4 is not None:\n p_inter_s4 = torch.zeros(b, 1, newH, newW, device=im.device) - 1\n p_inter_s4[:,:,0:ph,0:pw] = inter_s4\n inter_s4 = p_inter_s4\n\n images = model(im, seg, inter_s8, inter_s4)\n return_im = {}\n\n for key in ['pred_224', 'pred_28_3', 'pred_56_2']:\n return_im[key] = images[key][:,:,0:ph,0:pw]\n del images\n\n return return_im\n\ndef process_high_res_im(model, im, seg, L=900):\n\n stride = L//2\n\n _, _, h, w = seg.shape\n\n \"\"\"\n Global Step\n \"\"\"\n if max(h, w) > L:\n im_small = resize_max_side(im, L, 'area')\n seg_small = resize_max_side(seg, L, 'area')\n elif max(h, w) < L:\n im_small = resize_max_side(im, L, 'bicubic')\n seg_small = resize_max_side(seg, L, 'bilinear')\n else:\n im_small = im\n seg_small = seg\n\n images = safe_forward(model, im_small, seg_small)\n\n pred_224 = images['pred_224']\n pred_56 = images['pred_56_2']\n \n \"\"\"\n Local step\n \"\"\"\n\n for new_size in [max(h, w)]:\n im_small = resize_max_side(im, new_size, 'area')\n seg_small = resize_max_side(seg, new_size, 'area')\n _, _, h, w = seg_small.shape\n\n combined_224 = torch.zeros_like(seg_small)\n combined_weight = torch.zeros_like(seg_small)\n\n r_pred_224 = (F.interpolate(pred_224, size=(h, w), mode='bilinear', align_corners=False, recompute_scale_factor=True)>0.5).float()*2-1\n r_pred_56 = F.interpolate(pred_56, size=(h, w), mode='bilinear', align_corners=False, recompute_scale_factor=True)*2-1\n\n padding = 16\n step_size = stride - padding*2\n step_len = L\n\n used_start_idx = {}\n for x_idx in range((w)//step_size+1):\n for y_idx in range((h)//step_size+1):\n\n start_x = x_idx * step_size\n start_y = y_idx * step_size\n end_x = start_x + step_len\n end_y = start_y + step_len\n\n # Shift when required\n if end_y > h:\n end_y = h\n start_y = h - step_len\n if end_x > w:\n end_x = w\n start_x = w - step_len\n\n # Bound x/y range\n start_x = max(0, start_x)\n start_y = max(0, start_y)\n end_x = min(w, end_x)\n end_y = min(h, end_y)\n\n # The same crop might appear twice due to bounding/shifting\n start_idx = start_y*w + start_x\n if start_idx in used_start_idx:\n continue\n else:\n used_start_idx[start_idx] = True\n \n # Take crop\n im_part = im_small[:,:,start_y:end_y, start_x:end_x]\n seg_224_part = r_pred_224[:,:,start_y:end_y, start_x:end_x]\n seg_56_part = r_pred_56[:,:,start_y:end_y, start_x:end_x]\n\n # Skip when it is not an interesting crop anyway\n seg_part_norm = (seg_224_part>0).float()\n high_thres = 0.9\n low_thres = 0.1\n if (seg_part_norm.mean() > high_thres) or (seg_part_norm.mean() < low_thres):\n continue\n grid_images = safe_forward(model, im_part, seg_224_part, seg_56_part)\n grid_pred_224 = grid_images['pred_224']\n\n # Padding\n pred_sx = pred_sy = 0\n pred_ex = step_len\n pred_ey = step_len\n\n if start_x != 0:\n start_x += padding\n pred_sx += padding\n if start_y != 0:\n start_y += padding\n pred_sy += padding\n if end_x != w:\n end_x -= padding\n pred_ex -= padding\n if end_y != h:\n end_y -= padding\n pred_ey -= padding\n\n combined_224[:,:,start_y:end_y, start_x:end_x] += grid_pred_224[:,:,pred_sy:pred_ey,pred_sx:pred_ex]\n\n del grid_pred_224\n\n # Used for averaging\n combined_weight[:,:,start_y:end_y, start_x:end_x] += 1\n\n # Final full resolution output\n seg_norm = (r_pred_224/2+0.5)\n pred_224 = combined_224 / combined_weight\n pred_224 = torch.where(combined_weight==0, seg_norm, pred_224)\n\n _, _, h, w = seg.shape\n images = {}\n images['pred_224'] = F.interpolate(pred_224, size=(h, w), mode='bilinear', align_corners=True, recompute_scale_factor=True)\n\n return images['pred_224']\n\n\ndef process_im_single_pass(model, im, seg, L=900):\n \"\"\"\n A single pass version, aka global step only.\n \"\"\"\n\n _, _, h, w = im.shape\n if max(h, w) < L:\n im = resize_max_side(im, L, 'bicubic')\n seg = resize_max_side(seg, L, 'bilinear')\n\n if max(h, w) > L:\n im = resize_max_side(im, L, 'area')\n seg = resize_max_side(seg, L, 'area')\n\n images = safe_forward(model, im, seg)\n\n if max(h, w) < L:\n images['pred_224'] = F.interpolate(images['pred_224'], size=(h, w), mode='area', recompute_scale_factor=True)\n elif max(h, w) > L:\n images['pred_224'] = F.interpolate(images['pred_224'], size=(h, w), mode='bilinear', align_corners=True, recompute_scale_factor=True)\n\n return images['pred_224']\n" ]
[ [ "torch.where", "torch.zeros_like", "torch.nn.functional.interpolate", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
philmcp/TheRemoteFreelancer
[ "7009eb0fc2c3c86dbba8c4f00a6133c35d21f016" ]
[ "docs/scripts/sites_history_script.py" ]
[ "from pathlib import Path\n\nimport pandas\nimport pydriller\nfrom git import Repo\nimport io\n\nrepo_path = Path(__file__).parent.parent.parent\nsite_data = repo_path / \"docs\" / \"_data\" / \"sites.csv\"\nrepo = Repo(str(repo_path))\ndf = None\n\n\ndef get_change_hashes():\n for commit in pydriller.RepositoryMining(str(repo_path), filepath='docs/_data/sites.csv').traverse_commits():\n yield commit.hash, commit.author_date\n\n\nfor hash, date in get_change_hashes():\n commit = repo.commit(hash)\n try:\n target_file = commit.tree / \"docs\" / \"_data\" / \"sites.csv\"\n except KeyError:\n continue\n with io.BytesIO(target_file.data_stream.read()) as f:\n new_df = pandas.read_csv(f)\n new_df['date'] = date\n if df is None:\n df = new_df\n else:\n df = pandas.concat([df, new_df])\n\ndf.to_csv(\"ranking_history.csv\")\n" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
RashidLadj/OpenSfM
[ "a1b611a8c8056791f5e0e250ebd9f736fb9eda86" ]
[ "opensfm/large/tools.py" ]
[ "import cv2\nimport itertools\nimport logging\nimport networkx as nx\nimport numpy as np\nimport scipy.spatial as spatial\n\nfrom collections import namedtuple\nfrom networkx.algorithms import bipartite\nfrom functools import lru_cache\n\nfrom opensfm import align\nfrom opensfm import context\nfrom opensfm import pybundle\nfrom opensfm import dataset\nfrom opensfm import geo\nfrom opensfm import reconstruction\nfrom opensfm import multiview\n\nlogger = logging.getLogger(__name__)\n\n\nPartialReconstruction = namedtuple(\"PartialReconstruction\", [\"submodel_path\", \"index\"])\n\n\ndef kmeans(samples, nclusters, max_iter=100, attempts=20):\n criteria = (cv2.TERM_CRITERIA_MAX_ITER, max_iter, 1.0)\n flags = cv2.KMEANS_PP_CENTERS\n\n if context.OPENCV3:\n return cv2.kmeans(samples, nclusters, None, criteria, attempts, flags)\n else:\n return cv2.kmeans(samples, nclusters, criteria, attempts, flags)\n\n\ndef add_cluster_neighbors(positions, labels, centers, max_distance):\n reflla = np.mean(positions, 0)\n reference = geo.TopocentricConverter(reflla[0], reflla[1], 0)\n\n topocentrics = []\n for position in positions:\n x, y, z = reference.to_topocentric(position[0], position[1], 0)\n topocentrics.append([x, y])\n\n topocentrics = np.array(topocentrics)\n topo_tree = spatial.cKDTree(topocentrics)\n\n clusters = []\n for label in np.arange(centers.shape[0]):\n cluster_indices = np.where(labels == label)[0]\n\n neighbors = []\n for i in cluster_indices:\n neighbors.extend(\n topo_tree.query_ball_point(topocentrics[i], max_distance))\n\n cluster = list(np.union1d(cluster_indices, neighbors))\n clusters.append(cluster)\n\n return clusters\n\n\ndef connected_reconstructions(reconstruction_shots):\n g = nx.Graph()\n for r in reconstruction_shots:\n g.add_node(r, bipartite=0)\n for shot_id in reconstruction_shots[r]:\n g.add_node(shot_id, bipartite=1)\n g.add_edge(r, shot_id)\n\n p = bipartite.projected_graph(g, reconstruction_shots.keys())\n\n return p.edges()\n\n\ndef scale_matrix(covariance):\n try:\n L = np.linalg.cholesky(covariance)\n except Exception as e:\n logger.error(\n 'Could not compute Cholesky of covariance matrix {}'\n .format(covariance))\n\n d = np.diag(np.diag(covariance).clip(1e-8, None))\n L = np.linalg.cholesky(d)\n\n return np.linalg.inv(L)\n\n\ndef invert_similarity(s, A, b):\n s_inv = 1 / s\n A_inv = A.T\n b_inv = -s_inv * A_inv.dot(b)\n\n return s_inv, A_inv, b_inv\n\n\ndef partial_reconstruction_name(key):\n return str(key.submodel_path) + \"_index\" + str(key.index)\n\n\ndef add_camera_constraints_soft(ra, reconstruction_shots, reconstruction_name):\n added_shots = set()\n for key in reconstruction_shots:\n shots = reconstruction_shots[key]\n rec_name = reconstruction_name(key)\n ra.add_reconstruction(rec_name, 0, 0, 0, 0, 0, 0, 1, False)\n for shot_id in shots:\n shot = shots[shot_id]\n shot_name = str(shot_id)\n\n R = shot.pose.rotation\n t = shot.pose.translation\n\n if shot_id not in added_shots:\n ra.add_shot(shot_name, R[0], R[1], R[2],\n t[0], t[1], t[2], False)\n\n gps = shot.metadata.gps_position.value\n gps_sd = shot.metadata.gps_accuracy.value\n\n ra.add_absolute_position_constraint(\n shot_name, gps[0], gps[1], gps[2], gps_sd)\n\n added_shots.add(shot_id)\n\n covariance = np.diag([1e-5, 1e-5, 1e-5, 1e-2, 1e-2, 1e-2])\n sm = scale_matrix(covariance)\n rmc = pybundle.RARelativeMotionConstraint(\n rec_name, shot_name, R[0], R[1], R[2], t[0], t[1], t[2])\n\n for i in range(6):\n for j in range(6):\n rmc.set_scale_matrix(i, j, sm[i, j])\n\n ra.add_relative_motion_constraint(rmc)\n\n\ndef add_camera_constraints_hard(ra, reconstruction_shots,\n reconstruction_name,\n add_common_camera_constraint):\n for key in reconstruction_shots:\n shots = reconstruction_shots[key]\n rec_name = reconstruction_name(key)\n ra.add_reconstruction(rec_name, 0, 0, 0, 0, 0, 0, 1, False)\n for shot_id in shots:\n shot = shots[shot_id]\n shot_name = rec_name + str(shot_id)\n\n R = shot.pose.rotation\n t = shot.pose.translation\n ra.add_shot(shot_name, R[0], R[1], R[2],\n t[0], t[1], t[2], True)\n\n gps = shot.metadata.gps_position.value\n gps_sd = shot.metadata.gps_accuracy.value\n ra.add_relative_absolute_position_constraint(\n rec_name, shot_name, gps[0], gps[1], gps[2], gps_sd)\n\n if add_common_camera_constraint:\n connections = connected_reconstructions(reconstruction_shots)\n for connection in connections:\n rec_name1 = reconstruction_name(connection[0])\n rec_name2 = reconstruction_name(connection[1])\n\n shots1 = reconstruction_shots[connection[0]]\n shots2 = reconstruction_shots[connection[1]]\n\n common_images = set(shots1.keys()).intersection(shots2.keys())\n for image in common_images:\n ra.add_common_camera_constraint(rec_name1, rec_name1 +\n str(image),\n rec_name2, rec_name2 +\n str(image),\n 1, 0.1)\n@lru_cache(25)\ndef load_reconstruction(path, index):\n d1 = dataset.DataSet(path)\n r1 = d1.load_reconstruction()[index]\n g1 = d1.load_tracks_manager()\n return (path + (\"_%s\" % index)), (r1, g1)\n\n\ndef add_point_constraints(ra, reconstruction_shots, reconstruction_name):\n connections = connected_reconstructions(reconstruction_shots)\n for connection in connections:\n\n i1, (r1, g1) = load_reconstruction(\n connection[0].submodel_path, connection[0].index)\n i2, (r2, g2) = load_reconstruction(\n connection[1].submodel_path, connection[1].index)\n\n rec_name1 = reconstruction_name(connection[0])\n rec_name2 = reconstruction_name(connection[1])\n\n scale_treshold = 1.3\n treshold_in_meter = 0.3\n minimum_inliers = 20\n status, T, inliers = reconstruction.resect_reconstruction(\n r1, r2, g1, g2, treshold_in_meter, minimum_inliers)\n if not status:\n continue\n\n s, R, t = multiview.decompose_similarity_transform(T)\n if s > scale_treshold or s < (1.0/scale_treshold) or \\\n len(inliers) < minimum_inliers:\n continue\n\n for t1, t2 in inliers:\n c1 = r1.points[t1].coordinates\n c2 = r2.points[t2].coordinates\n\n ra.add_common_point_constraint(\n rec_name1, c1[0], c1[1], c1[2],\n rec_name2, c2[0], c2[1], c2[2],\n 1e-1)\n\n\ndef load_reconstruction_shots(meta_data):\n reconstruction_shots = {}\n for submodel_path in meta_data.get_submodel_paths():\n data = dataset.DataSet(submodel_path)\n if not data.reconstruction_exists():\n continue\n\n reconstruction = data.load_reconstruction()\n for index, partial_reconstruction in enumerate(reconstruction):\n key = PartialReconstruction(submodel_path, index)\n reconstruction_shots[key] = partial_reconstruction.shots\n\n return reconstruction_shots\n\n\ndef align_reconstructions(reconstruction_shots,\n reconstruction_name,\n use_points_constraints,\n camera_constraint_type='soft_camera_constraint'):\n ra = pybundle.ReconstructionAlignment()\n\n if camera_constraint_type is 'soft_camera_constraint':\n add_camera_constraints_soft(ra, reconstruction_shots,\n reconstruction_name)\n if camera_constraint_type is 'hard_camera_constraint':\n add_camera_constraints_hard(ra, reconstruction_shots,\n reconstruction_name, True)\n if use_points_constraints:\n add_point_constraints(ra, reconstruction_shots, reconstruction_name)\n\n logger.info(\"Running alignment\")\n ra.run()\n logger.info(ra.brief_report())\n\n transformations = {}\n for key in reconstruction_shots:\n rec_name = reconstruction_name(key)\n r = ra.get_reconstruction(rec_name)\n s = r.scale\n A = cv2.Rodrigues(np.array([r.rx, r.ry, r.rz]))[0]\n b = np.array([r.tx, r.ty, r.tz])\n transformations[key] = invert_similarity(s, A, b)\n\n return transformations\n\n\ndef apply_transformations(transformations):\n submodels = itertools.groupby(sorted(transformations.keys(), key=lambda key: key.submodel_path), lambda key: key.submodel_path)\n for submodel_path, keys in submodels:\n data = dataset.DataSet(submodel_path)\n if not data.reconstruction_exists():\n continue\n\n reconstruction = data.load_reconstruction()\n for key in keys:\n partial_reconstruction = reconstruction[key.index]\n s, A, b = transformations[key]\n align.apply_similarity(partial_reconstruction, s, A, b)\n\n data.save_reconstruction(reconstruction, 'reconstruction.aligned.json')\n" ]
[ [ "numpy.diag", "numpy.linalg.inv", "numpy.arange", "numpy.union1d", "numpy.mean", "numpy.linalg.cholesky", "numpy.array", "numpy.where", "scipy.spatial.cKDTree" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yellowpsyduck/OccamFusionnet
[ "bafdda07939c6370792c5db2b50bca27729804e2" ]
[ "occam/layers.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n# Origin: https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents/drqa\n\nimport sys\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom torch.autograd.function import InplaceFunction\nfrom oldsru import SRUCell\n\n\n# ------------------------------------------------------------------------------\n# Modules\n# ------------------------------------------------------------------------------\n\ndef variational_dropout(x, p=0, training=False):\n \"\"\"\n x: batch * len * input_size\n \"\"\"\n if training == False or p == 0:\n return x\n dropout_mask = Variable(1.0 / (1-p) * torch.bernoulli((1-p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1)), requires_grad=False)\n return dropout_mask.unsqueeze(1).expand_as(x) * x\n\n\ndef dropout(x, p=0, training=False, variational=False):\n \"\"\"\n x: (batch * len * input_size) or (any other shape)\n \"\"\"\n if p > 0:\n if variational and len(x.size()) == 3: # if x is (batch * len * input_size)\n return variational_dropout(x, p=p, training=training)\n else:\n return F.dropout(x, p=p, training=training)\n else:\n return x\n\n\nclass SizeDropout(nn.Module):\n def __init__(self, input_size, min_size, dim, rescale=True):\n super().__init__()\n self.min_size = min_size\n self.input_size = input_size\n self.dim = dim\n self.eval_size = input_size\n self.rescale = rescale\n if min_size < input_size:\n mask = torch.cat([torch.ones(min_size), torch.arange(input_size - min_size, 0, -1) / (input_size - min_size + 1)], dim=0) \n else:\n mask = torch.ones(input_size)\n self.register_buffer('mask', torch.ones(input_size))\n self.eval_mask = mask.cuda()\n self.train_size = input_size\n self.generate_mask(1)\n\n def sample_train_size(self):\n if self.input_size == self.min_size:\n return self.input_size\n self.train_size = self.min_size + min(int(torch.rand(1)[0] * (self.input_size - self.min_size + 1)), self.input_size) ## take the min in case of getting 1 from torch.rand\n return self.train_size\n\n def generate_mask(self, max_dim):\n curr_mask = self.mask.clone()\n if self.train_size < self.input_size:\n curr_mask[self.train_size:] = 0\n for i in range(self.dim):\n curr_mask.unsqueeze_(0)\n for i in range(self.dim+1, max_dim):\n curr_mask.unsqueeze_(-1)\n self.curr_mask_var = Variable(curr_mask, requires_grad=False)\n\n def generate_eval_mask(self, max_dim):\n if self.rescale:\n curr_mask = self.eval_mask.clone()\n else:\n curr_mask = torch.ones(self.input_size).cuda()\n if self.eval_size < self.input_size:\n curr_mask[self.eval_size:] = 0\n for i in range(self.dim):\n curr_mask.unsqueeze_(0)\n for i in range(self.dim+1, max_dim):\n curr_mask.unsqueeze_(-1)\n self.curr_eval_mask_var = Variable(curr_mask, requires_grad=False)\n\n def forward(self, x, resample=True, mask=None):\n assert x.size(self.dim) == self.input_size, 'x: {}, input_size: {}'.format(x.size(), self.input_size)\n if self.input_size == self.min_size:\n return x\n if self.training:\n if resample:\n self.sample_train_size()\n self.generate_mask(x.dim())\n elif isinstance(mask, Variable):\n self.curr_mask_var = mask\n elif x.dim() != self.curr_mask_var.dim() or type(x.data) != type(self.curr_mask_var.data):\n self.generate_mask(x.dim())\n x = x * self.curr_mask_var\n else:\n self.generate_eval_mask(x.dim())\n x = x * self.curr_eval_mask_var\n return x\n\n def __repr__(self):\n return '{}(input_size={}, min_size={}, dim={}, rescale={}, eval_size={})'.format(\n self.__class__.__name__, self.input_size, self.min_size, self.dim, self.rescale, self.eval_size)\n\n\nclass LayerNorm(nn.Module):\n def __init__(self, features, eps=1e-6):\n super().__init__()\n self.gamma = nn.Parameter(torch.ones(features))\n self.beta = nn.Parameter(torch.zeros(features))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.gamma * (x - mean) / (std + self.eps) + self.beta\n\n\nclass LayerNormChannelFirst(nn.Module):\n '''Layer Norm implementation source: https://github.com/pytorch/pytorch/issues/1959'''\n def __init__(self, features, eps=1e-6):\n super().__init__()\n self.gamma = nn.Parameter(torch.ones(features).view(1, -1, 1))\n self.beta = nn.Parameter(torch.zeros(features).view(1, -1, 1))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-2, keepdim=True)\n std = x.std(-2, keepdim=True)\n return self.gamma * (x - mean) / (std + self.eps) + self.beta\n\n\nclass StackedBRNN(nn.Module):\n RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN, 'sru': SRUCell}\n SRU_TYPES = {'sru', 'sru-v2'}\n\n def __init__(self, input_size, hidden_size, num_layers,\n dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM,\n variational_dropout=True,\n residual=False,\n squeeze_excitation=0,\n sd_min_size=0, sd_rescale=True,\n concat_layers=False, padding=False):\n super(StackedBRNN, self).__init__()\n self.padding = padding\n self.dropout_output = dropout_output\n self.dropout_rate = dropout_rate\n self.variational_dropout = variational_dropout\n self.num_layers = num_layers\n self.residual = residual\n self.squeeze_excitation = squeeze_excitation\n self.concat_layers = concat_layers\n self.sd_min_size = sd_min_size\n self.sd_rescale = sd_rescale\n self.rnns = nn.ModuleList()\n self.rnn_type = rnn_type\n for i in range(num_layers):\n input_size = input_size if i == 0 else 2 * hidden_size\n if rnn_type in self.SRU_TYPES:\n self.rnns.append(self.RNN_TYPES[rnn_type](input_size, hidden_size,\n dropout=dropout_rate,\n rnn_dropout=dropout_rate,\n use_tanh=1,\n bidirectional=True))\n else:\n self.rnns.append(self.RNN_TYPES[rnn_type](input_size, hidden_size,\n num_layers=1,\n bidirectional=True))\n if sd_min_size > 0:\n self.sds = nn.ModuleList()\n for i in range(num_layers):\n self.sds.append(SizeDropout(hidden_size, sd_min_size, 3, sd_rescale))\n if squeeze_excitation > 0:\n self.ses = nn.ModuleList()\n for i in range(num_layers):\n self.ses.append(nn.Sequential(nn.Linear(hidden_size*2, hidden_size*2//self.squeeze_excitation),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_size*2//self.squeeze_excitation, hidden_size*2),\n nn.Sigmoid()))\n\n def forward(self, x, x_mask):\n \"\"\"Can choose to either handle or ignore variable length sequences.\n Always handle padding in eval.\n \"\"\"\n # No padding necessary.\n if x_mask.data.sum() == 0:\n return self._forward_unpadded(x, x_mask)\n if self.padding and not self.rnn_type in self.SRU_TYPES:\n return self._forward_padded(x, x_mask)\n # We don't care.\n return self._forward_unpadded(x, x_mask)\n\n def _forward_unpadded(self, x, x_mask):\n \"\"\"Faster encoding that ignores any padding.\"\"\"\n # Transpose batch and sequence dims\n x = x.transpose(0, 1)\n lengths_var = Variable(x_mask.data.eq(0).long().sum(1).squeeze().float().unsqueeze(1), requires_grad=False)\n\n # Encode all layers\n outputs = [x]\n for i in range(self.num_layers):\n rnn_input = outputs[-1]\n\n # Apply dropout to hidden input\n if self.dropout_rate > 0 and self.rnn_type not in self.SRU_TYPES:\n rnn_input = dropout(rnn_input, p=self.dropout_rate, training=self.training,\n variational=self.variational_dropout)\n # Forward\n rnn_output = self.rnns[i](rnn_input)[0]\n if self.residual and rnn_output.size() == rnn_input.size():\n rnn_output = rnn_output + outputs[-1]\n\n if self.sd_min_size > 0:\n bs, l, hs = rnn_output.size()\n rnn_output = self.sds[i](rnn_output.view(bs, l, 2, hs//2)).view(bs, l, hs)\n\n if self.squeeze_excitation > 0:\n rnn_output = rnn_output * self.ses[i](rnn_output.sum(0) / lengths_var).unsqueeze(0)\n outputs.append(rnn_output)\n\n # Concat hidden layers\n if self.concat_layers:\n output = torch.cat(outputs[1:], 2)\n else:\n output = outputs[-1]\n\n # Transpose back\n output = output.transpose(0, 1)\n\n # Dropout on output layer\n if self.dropout_output and self.dropout_rate > 0:\n output = dropout(output, p=self.dropout_rate, training=self.training,\n variational=self.variational_dropout)\n return output\n\n def _forward_padded(self, x, x_mask):\n \"\"\"Slower (significantly), but more precise,\n encoding that handles padding.\"\"\"\n # Compute sorted sequence lengths\n lengths = x_mask.data.eq(0).long().sum(1).squeeze()\n _, idx_sort = torch.sort(lengths, dim=0, descending=True)\n _, idx_unsort = torch.sort(idx_sort, dim=0)\n\n lengths_var = Variable(lengths[idx_sort].float().unsqueeze(1), requires_grad=False)\n lengths = list(lengths[idx_sort])\n idx_sort = Variable(idx_sort)\n idx_unsort = Variable(idx_unsort)\n\n # Sort x\n x = x.index_select(0, idx_sort)\n\n # Transpose batch and sequence dims\n x = x.transpose(0, 1)\n\n # Encode all layers\n outputs = [x]\n for i in range(self.num_layers):\n rnn_input = outputs[-1]\n\n # Apply dropout to input\n if self.dropout_rate > 0:\n rnn_input = dropout(rnn_input, p=self.dropout_rate, training=self.training,\n variational=self.variational_dropout)\n rnn_input = nn.utils.rnn.pack_padded_sequence(rnn_input, lengths)\n\n rnn_output = self.rnns[i](rnn_input)[0]\n rnn_output = nn.utils.rnn.pad_packed_sequence(rnn_output)[0]\n\n if self.residual and rnn_output.size() == outputs[-1].size():\n rnn_output = rnn_output + outputs[-1]\n\n if self.sd_min_size > 0:\n bs, l, hs = rnn_output.size()\n rnn_output = self.sds[i](rnn_output.view(bs, l, 2, hs//2)).view(bs, l, hs)\n\n if self.squeeze_excitation > 0:\n rnn_output = rnn_output * self.ses[i](rnn_output.sum(0) / lengths_var).unsqueeze(0)\n outputs.append(rnn_output)\n\n # Concat hidden layers or take final\n if self.concat_layers:\n output = torch.cat(outputs[1:], 2)\n else:\n output = outputs[-1]\n\n # Transpose and unsort\n output = output.transpose(0, 1)\n output = output.index_select(0, idx_unsort)\n\n # Pad up to original batch sequence length\n if output.size(1) != x_mask.size(1):\n padding = torch.zeros(output.size(0),\n x_mask.size(1) - output.size(1),\n output.size(2)).type(output.data.type())\n output = torch.cat([output, Variable(padding)], 1)\n\n # Dropout on output layer\n if self.dropout_output and self.dropout_rate > 0:\n output = F.dropout(output,\n p=self.dropout_rate,\n training=self.training)\n return output\n\n\nclass DilatedResNet(nn.Module):\n \"\"\"Dilated ResNet with GRU to replace BRNN.\"\"\"\n\n def __init__(self, input_size, hidden_size, num_layers,\n dilation_layers=1, dilation_base=1, dilation_offset=0,\n input_padding=0, masked=True,\n growing_mode='block', # ['block', 'layer']\n block_type='dilated_conv', # ['dilated_conv', 'dilated_sep_conv', 'sep_conv']\n activation_type='glu', # ['glu', 'relu']\n dropout_rate=0, dropout_output=False):\n super(DilatedResNet, self).__init__()\n # self.padding = padding\n self.dropout_output = dropout_output\n self.dropout_rate = dropout_rate\n self.num_layers = num_layers\n self.input_padding = input_padding\n # self.concat_layers = concat_layers\n if activation_type == 'glu':\n self.reduce_block = nn.Sequential(\n nn.Conv1d(input_size, hidden_size*2, 3, padding=1 + input_padding),\n nn.GLU(dim=1))\n else:\n self.reduce_block = nn.Sequential(\n nn.Conv1d(input_size, hidden_size, 3, padding=1 + input_padding),\n nn.ReLU(inplace=True))\n self.cnns = nn.ModuleList()\n self.masked = masked\n assert num_layers % 2 == 1, 'num_layers=' + str(num_layers) +' is not odd'\n for i in range(num_layers // 2):\n if block_type == 'sep_conv':\n if growing_mode == 'block':\n kernel_size = 2 ** (i - dilation_offset + 2) - 1 if dilation_offset <= i < dilation_offset + dilation_layers else 3\n kernel_size = (kernel_size, kernel_size)\n elif growing_mode == 'layer':\n kernel_size = [1, 1]\n kernel_size[0] = 2 ** (2*i+2-dilation_offset) - 1 if dilation_offset <= (2*i+1) < dilation_offset + dilation_layers else 3\n kernel_size[1] = 2 ** (2*i+3-dilation_offset) - 1 if dilation_offset <= (2*i+2) < dilation_offset + dilation_layers else 3\n else:\n raise NotImplementedError\n dilation = 1\n padding = (kernel_size[0] // 2, kernel_size[1] // 2)\n elif block_type in {'dilated_conv', 'dilated_sep_conv'}:\n if growing_mode == 'block':\n dilation = dilation_base ** (i - dilation_offset + 1) if dilation_offset <= i < dilation_offset + dilation_layers else 1\n elif growing_mode == 'layer':\n dilation = [1, 1]\n dilation[0] = dilation_base ** (2*i+1-dilation_offset) if dilation_offset <= (2*i+1) < dilation_offset + dilation_layers else 1\n dilation[1] = dilation_base ** (2*i+2-dilation_offset) if dilation_offset <= (2*i+2) < dilation_offset + dilation_layers else 1\n else:\n raise NotImplementedError\n padding = dilation\n kernel_size = 3\n else:\n raise NotImplementedError\n\n if block_type == 'dilated_conv':\n Block = GLUResBlock\n elif block_type in {'dilated_sep_conv', 'sep_conv'}:\n Block = GLUResBlock_sep\n else:\n raise NotImplementedError\n\n self.cnns.append(Block(hidden_size, hidden_size,\n hidden_size, kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n dropout_rate=dropout_rate,\n activation_type=activation_type))\n\n def forward(self, x, x_mask=None):\n # swap filter dim and sequence dim\n if self.input_padding > 0 and self.masked and x_mask is not None:\n x_mask = F.pad(x_mask.unsqueeze(1).unsqueeze(2), (self.input_padding, self.input_padding, 0, 0), 'constant', True)[:, 0, 0, :]\n x = x.transpose(1, 2)\n if self.dropout_output and self.dropout_rate > 0:\n x = F.dropout(x, p=self.dropout_rate,\n training=self.training)\n x = self.reduce_block(x)\n for cnn in self.cnns:\n x = cnn(x, x_mask)\n\n # Dropout on output layer\n if self.dropout_output and self.dropout_rate > 0:\n x = F.dropout(x, p=self.dropout_rate,\n training=self.training)\n x = x.transpose(1, 2)\n return x.contiguous()\n\n\nclass GLUResBlock(nn.Module):\n '''GLU Res Block\n input -> drop1 -> conv1 -> GLU1 -> drop2 -> conv2 -> GLU2 --> residual\n add residual back to input\n '''\n def __init__(self, input_size, hidden_size, output_size, kernel_size=3,\n padding=1, groups=1, dilation=1, dropout_rate=0, activation_type='glu'):\n super(GLUResBlock, self).__init__()\n if type(dilation) is int:\n dilation = (dilation, dilation)\n if type(kernel_size) is int:\n kernel_size = (kernel_size, kernel_size)\n if type(padding) is int:\n padding = (padding, padding)\n\n self.dropout_rate = dropout_rate\n self.drop1 = nn.Dropout2d(dropout_rate)\n self.activation_type = activation_type\n if activation_type == 'glu':\n self.conv1 = nn.Conv1d(input_size, hidden_size*2, kernel_size[0],\n padding=padding[0], dilation=dilation[0])\n self.act1 = nn.GLU(dim=1)\n elif activation_type == 'relu':\n self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size[0],\n padding=padding[0], dilation=dilation[0])\n self.act1 = nn.ReLU(inplace=True)\n\n self.drop2 = nn.Dropout2d(dropout_rate)\n if activation_type == 'glu':\n self.conv2 = nn.Conv1d(hidden_size, output_size*2, kernel_size[1],\n padding=padding[1], dilation=dilation[1])\n self.act2 = nn.GLU(dim=1)\n elif activation_type == 'relu':\n self.conv2 = nn.Conv1d(hidden_size, output_size, kernel_size[1],\n padding=padding[1], dilation=dilation[1])\n self.act2 = nn.ReLU(inplace=True)\n\n def forward(self, x, x_mask=None, masked=True):\n res = x\n res = self.drop1(res.unsqueeze(3))[:, :, :, 0]\n res = self.act1(self.conv1(x))\n if masked and x_mask is not None:\n res.masked_fill_(x_mask.unsqueeze(1), 0)\n\n res = self.drop2(res.unsqueeze(3))[:, :, :, 0]\n res = self.act2(self.conv2(x))\n if masked and x_mask is not None:\n res.masked_fill_(x_mask.unsqueeze(1), 0)\n\n if x.size(1) == res.size(1):\n x = x + res\n elif x.size(1) > res.size(1):\n res = res + x[:, :res.size(1)]\n x = res\n else:\n x = x + res[:, :x.size(1)]\n return x\n\n\nclass GLUResBlock_sep(nn.Module):\n '''GLU Res Block\n input -> drop1 -> conv1 -> GLU1 -> drop2 -> conv2 -> GLU2 --> residual\n add residual back to input\n '''\n def __init__(self, input_size, hidden_size, output_size, kernel_size=3,\n padding=1, groups=1, dilation=1, dropout_rate=0, activation_type='glu'):\n super().__init__()\n if type(dilation) is int:\n dilation = (dilation, dilation)\n if type(kernel_size) is int:\n kernel_size = (kernel_size, kernel_size)\n if type(padding) is int:\n padding = (padding, padding)\n\n self.dropout_rate = dropout_rate\n self.drop1 = nn.Dropout2d(dropout_rate)\n self.activation_type = activation_type\n if activation_type == 'glu':\n self.conv1_1 = nn.Conv1d(input_size, input_size, kernel_size[0],\n groups=input_size, padding=padding[0], dilation=dilation[0])\n self.conv1_2 = nn.Conv1d(input_size, hidden_size*2, 1)\n self.act1 = nn.GLU(dim=1)\n elif activation_type == 'relu':\n self.conv1_1 = nn.Conv1d(input_size, input_size, kernel_size[0],\n groups=input_size, padding=padding[0], dilation=dilation[0])\n self.conv1_2 = nn.Conv1d(input_size, hidden_size, 1)\n self.act1 = nn.ReLU(inplace=True)\n\n self.drop2 = nn.Dropout2d(dropout_rate)\n if activation_type == 'glu':\n self.conv2_1 = nn.Conv1d(hidden_size, hidden_size, kernel_size[1],\n groups=hidden_size, padding=padding[1], dilation=dilation[1])\n self.conv2_2 = nn.Conv1d(hidden_size, output_size*2, 1)\n self.act2 = nn.GLU(dim=1)\n elif activation_type == 'relu':\n self.conv2_1 = nn.Conv1d(hidden_size, hidden_size, kernel_size[1],\n groups=hidden_size, padding=padding[1], dilation=dilation[1])\n self.conv2_2 = nn.Conv1d(hidden_size, output_size, 1)\n self.act2 = nn.ReLU(inplace=True)\n\n def forward(self, x, x_mask=None, masked=True):\n res = x\n res = self.drop1(res.unsqueeze(3)).squeeze(3)\n res = self.act1(self.conv1_2(self.conv1_1(x)))\n if masked and x_mask is not None:\n res.masked_fill_(x_mask.unsqueeze(1), 0)\n\n res = self.drop2(res.unsqueeze(3)).squeeze(3)\n res = self.act2(self.conv2_2(self.conv2_1(x)))\n if masked and x_mask is not None:\n res.masked_fill_(x_mask.unsqueeze(1), 0)\n\n if x.size(1) == res.size(1):\n x = x + res\n elif x.size(1) > res.size(1):\n res = res + x[:, :res.size(1)]\n x = res\n else:\n x = x + res[:, :x.size(1)]\n return x\n\nclass MLP(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers,\n dropout_rate=0, variational_dropout=True,\n concat_layers=False, output_act=True):\n super(MLP, self).__init__()\n self.dropout_rate = dropout_rate\n self.variational_dropout = variational_dropout\n self.num_layers = num_layers\n self.concat_layers = concat_layers\n self.linears = nn.ModuleList()\n self.output_act = output_act\n for i in range(num_layers):\n input_size = input_size if i == 0 else hidden_size\n self.linears.append(nn.Linear(input_size, hidden_size))\n\n def forward(self, x):\n original_size = x.size()\n if len(original_size) == 3:\n x = x.view(-1, original_size[2]).contiguous()\n\n hiddens = []\n for i in range(self.num_layers):\n if self.dropout_rate > 0.:\n x = dropout(x, p=self.dropout_rate, training=self.training,\n variational=self.variational_dropout)\n if i < self.num_layers - 1 or self.output_act:\n x = F.relu(self.linears[i](x), inplace=True)\n hiddens.append(x)\n \n if self.concat_layers:\n x = torch.cat(hiddens, 2)\n\n if len(original_size) == 3:\n x = x.view(original_size[0], original_size[1], -1).contiguous()\n return x\n\n\nclass SeqAttnMatch(nn.Module):\n \"\"\"Given sequences X and Y, match sequence Y to each element in X.\n * o_i = sum(alpha_j * y_j) for i in X\n * alpha_j = softmax(y_j * x_i)\n \"\"\"\n def __init__(self, input_size, hidden_size=None, identity=False, dropout=0., variational_dropout=False):\n super(SeqAttnMatch, self).__init__()\n self.dropout = dropout\n self.variational_dropout = variational_dropout\n if hidden_size is None:\n hidden_size = input_size\n if not identity:\n self.linear = nn.Linear(input_size, hidden_size)\n else:\n self.linear = None\n\n def forward(self, x, y, y_mask, scores_hook=None):\n \"\"\"Input shapes:\n x = batch * len1 * h\n y = batch * len2 * h\n y_mask = batch * len2\n Output shapes:\n matched_seq = batch * len1 * h\n \"\"\"\n if y.size(0) == 1 and x.size(0) > 1:\n y = y.repeat(x.size(0), 1, 1)\n y_mask = y_mask.repeat(x.size(0), 1)\n elif x.size(0) == 1 and y.size(0) > 1:\n x = x.repeat(y.size(0), 1, 1)\n # Project vectors\n if self.linear is not None:\n batch_size = x.size(0)\n len1 = x.size(1)\n len2 = y.size(1)\n x = dropout(x, p=self.dropout, training=self.training, variational=self.variational_dropout)\n y = dropout(y, p=self.dropout, training=self.training, variational=self.variational_dropout)\n x_proj = self.linear(x.view(-1, x.size(2))).view(batch_size, len1, -1)\n x_proj = F.relu(x_proj)\n y_proj = self.linear(y.view(-1, y.size(2))).view(batch_size, len2, -1)\n y_proj = F.relu(y_proj)\n else:\n x_proj = x\n y_proj = y\n\n # Compute scores\n scores = x_proj.bmm(y_proj.transpose(2, 1))\n if scores_hook is not None:\n scores = scores_hook(scores)\n\n # Mask padding\n y_mask = y_mask.unsqueeze(1).expand(scores.size())\n scores.data.masked_fill_(y_mask.data, -float('inf'))\n\n # Normalize with softmax\n alpha_flat = F.softmax(scores.view(-1, y.size(1)), dim=1)\n alpha = alpha_flat.view(-1, x.size(1), y.size(1))\n\n # Take weighted average\n matched_seq = alpha.bmm(y)\n return matched_seq\n\n\nclass BilinearSeqAttn(nn.Module):\n \"\"\"A bilinear attention layer over a sequence X w.r.t y:\n * o_i = softmax(x_i'Wy) for x_i in X.\n\n Optionally don't normalize output weights.\n \"\"\"\n def __init__(self, x_size, y_size, identity=False):\n super(BilinearSeqAttn, self).__init__()\n if not identity:\n self.linear = nn.Linear(y_size, x_size)\n else:\n self.linear = None\n\n def forward(self, x, y, x_mask, log=False, logit=False):\n \"\"\"\n x = batch * len * h1\n y = batch * h2\n x_mask = batch * len\n \"\"\"\n if y.size(0) == 1 and x.size(0) > 1:\n y = y.repeat(x.size(0), 1)\n elif x.size(0) == 1 and y.size(0) > 1:\n x = x.repeat(y.size(0), 1, 1)\n x_mask = x_mask.repeat(y.size(0), 1)\n\n Wy = self.linear(y) if self.linear is not None else y\n xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)\n xWy.data.masked_fill_(x_mask.data, -float('inf'))\n if logit:\n return xWy\n elif log:\n # In training we output log-softmax for NLL\n alpha = F.log_softmax(xWy, dim=1)\n else:\n # ...Otherwise 0-1 probabilities\n alpha = F.softmax(xWy, dim=1)\n return alpha\n\n\nclass LinearSeqAttn(nn.Module):\n \"\"\"Self attention over a sequence:\n * o_i = softmax(Wx_i) for x_i in X.\n \"\"\"\n def __init__(self, input_size):\n super(LinearSeqAttn, self).__init__()\n self.linear = nn.Linear(input_size, 1)\n\n def forward(self, x, x_mask, log=False):\n \"\"\"\n x = batch * len * hdim\n x_mask = batch * len\n \"\"\"\n x_flat = x.contiguous().view(-1, x.size(-1))\n scores = self.linear(x_flat).view(x.size(0), x.size(1))\n scores.data.masked_fill_(x_mask.data, -float('inf'))\n if log:\n alpha = F.log_softmax(scores, dim=1)\n else:\n alpha = F.softmax(scores, dim=1)\n return alpha\n\n\nclass RNNEncoder(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers,\n dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM,\n variational_dropout=True, aux_size=0):\n super(RNNEncoder, self).__init__()\n self.variational_dropout = variational_dropout\n self.dropout_rate = dropout_rate\n self.num_layers = num_layers\n self.rnns = nn.ModuleList()\n for i in range(num_layers):\n input_size_ = (input_size + 2 * hidden_size * i)\n if i == 0: input_size_ += aux_size\n self.rnns.append(rnn_type(input_size_, hidden_size, num_layers=1, bidirectional=True))\n\n def forward(self, x, x_mask, aux_input=None):\n # Transpose batch and sequence dims\n x = x.transpose(0, 1)\n if aux_input is not None:\n aux_input = aux_input.transpose(0, 1)\n\n # Encode all layers\n hiddens = [x]\n for i in range(self.num_layers):\n rnn_input = torch.cat(hiddens, 2)\n if i == 0 and aux_input is not None:\n rnn_input = torch.cat([rnn_input, aux_input], 2)\n\n # Apply dropout to input\n if self.dropout_rate > 0:\n rnn_input = dropout(rnn_input, p=self.dropout_rate, training=self.training,\n variational=self.variational_dropout)\n # Forward\n rnn_output = self.rnns[i](rnn_input)[0]\n hiddens.append(rnn_output)\n\n # Transpose back\n hiddens = [h.transpose(0, 1) for h in hiddens]\n return hiddens[1:]\n\n\nclass MTLSTM(nn.Module):\n def __init__(self, opt, embedding=None, padding_idx=0, with_emb=True):\n \"\"\"Initialize an MTLSTM\n\n Arguments:\n embedding (Float Tensor): If not None, initialize embedding matrix with specified embedding vectors\n \"\"\"\n super(MTLSTM, self).__init__()\n\n self.embedding = nn.Embedding(opt['vocab_size'], opt['embedding_dim'], padding_idx=padding_idx)\n if embedding is not None:\n self.embedding.weight.data = embedding\n\n state_dict = torch.load(opt['MTLSTM_path'], map_location=lambda storage, loc: storage)\n self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True)\n self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True)\n\n state_dict1 = dict([(name, param.data) if isinstance(param, nn.Parameter) else (name, param)\n for name, param in state_dict.items() if '0' in name])\n state_dict2 = dict([(name.replace('1', '0'), param.data) if isinstance(param, nn.Parameter) else (name.replace('1', '0'), param)\n for name, param in state_dict.items() if '1' in name])\n self.rnn1.load_state_dict(state_dict1)\n self.rnn2.load_state_dict(state_dict2)\n\n for p in self.embedding.parameters():\n p.requires_grad = False\n for p in self.rnn1.parameters():\n p.requires_grad = False\n for p in self.rnn2.parameters():\n p.requires_grad = False\n\n self.output_size = 600\n\n def setup_eval_embed(self, eval_embed, padding_idx=0):\n \"\"\"Allow evaluation vocabulary size to be greater than training vocabulary size\n\n Arguments:\n eval_embed (Float Tensor): Initialize eval_embed to be the specified embedding vectors\n \"\"\"\n self.eval_embed = nn.Embedding(eval_embed.size(0), eval_embed.size(1), padding_idx = padding_idx)\n self.eval_embed.weight.data = eval_embed\n\n for p in self.eval_embed.parameters():\n p.requires_grad = False\n\n def forward(self, x_idx, x_mask):\n \"\"\"A pretrained MT-LSTM (McCann et. al. 2017).\n This LSTM was trained with 300d 840B GloVe on the WMT 2017 machine translation dataset.\n\n Arguments:\n x_idx (Long Tensor): a Long Tensor of size (batch * len).\n x_mask (Byte Tensor): a Byte Tensor of mask for the input tensor (batch * len).\n \"\"\"\n # emb = self.embedding if self.training else self.eval_embed\n emb = self.embedding\n x_hiddens = emb(x_idx)\n\n lengths = x_mask.data.eq(0).long().sum(1).squeeze()\n lens, indices = torch.sort(lengths, 0, True)\n\n output1, _ = self.rnn1(nn.utils.rnn.pack_padded_sequence(x_hiddens[indices], lens.tolist(), batch_first=True))\n output2, _ = self.rnn2(output1)\n\n output1 = nn.utils.rnn.pad_packed_sequence(output1, batch_first=True)[0]\n output2 = nn.utils.rnn.pad_packed_sequence(output2, batch_first=True)[0]\n\n _, _indices = torch.sort(indices, 0)\n output1 = output1[_indices]\n output2 = output2[_indices]\n\n return output1, output2\n\n\n# Attention layer\nclass FullAttention(nn.Module):\n def __init__(self, full_size, hidden_size, num_level, dropout=0., variational_dropout=True):\n super(FullAttention, self).__init__()\n assert(hidden_size % num_level == 0)\n self.full_size = full_size\n self.hidden_size = hidden_size\n self.attsize_per_lvl = hidden_size // num_level\n self.num_level = num_level\n self.linear = nn.Linear(full_size, hidden_size, bias=False)\n self.linear_final = nn.Parameter(torch.ones(1, hidden_size), requires_grad = True)\n self.output_size = hidden_size\n self.dropout = dropout\n self.variational_dropout = variational_dropout\n # print(\"Full Attention: (atten. {} -> {}, take {}) x {}\".format(self.full_size, self.attsize_per_lvl, hidden_size // num_level, self.num_level))\n\n def forward(self, x1_att, x2_att, x2, x2_mask):\n \"\"\"\n x1_att: batch * len1 * full_size\n x2_att: batch * len2 * full_size\n x2: batch * len2 * hidden_size\n x2_mask: batch * len2\n \"\"\"\n if x1_att.size(0) == 1 and x2_att.size(0) > 1:\n x1_att = x1_att.repeat(x2_att.size(0), 1, 1)\n elif x2_att.size(0) == 1 and x1_att.size(0) > 1:\n x2_att = x2_att.repeat(x1_att.size(0), 1, 1)\n x2 = x2.repeat(x1_att.size(0), 1, 1)\n x2_mask = x2_mask.repeat(x1_att.size(0), 1)\n\n batch_size = x1_att.size(0)\n len1 = x1_att.size(1)\n len2 = x2_att.size(1)\n\n x1_att = dropout(x1_att, p=self.dropout, training=self.training, variational=self.variational_dropout)\n x2_att = dropout(x2_att, p=self.dropout, training=self.training, variational=self.variational_dropout)\n\n x1_key = F.relu(self.linear(x1_att.view(-1, self.full_size)))\n x2_key = F.relu(self.linear(x2_att.view(-1, self.full_size)))\n final_v = self.linear_final.expand_as(x2_key)\n x2_key = final_v * x2_key\n\n x1_rep = x1_key.view(-1, len1, self.num_level, self.attsize_per_lvl).transpose(1, 2).contiguous().view(-1, len1, self.attsize_per_lvl)\n x2_rep = x2_key.view(-1, len2, self.num_level, self.attsize_per_lvl).transpose(1, 2).contiguous().view(-1, len2, self.attsize_per_lvl)\n\n scores = x1_rep.bmm(x2_rep.transpose(1, 2)).view(-1, self.num_level, len1, len2) # batch * num_level * len1 * len2\n\n x2_mask = x2_mask.unsqueeze(1).unsqueeze(2).expand_as(scores)\n scores.data.masked_fill_(x2_mask.data, -float('inf'))\n\n alpha_flat = F.softmax(scores.view(-1, len2), dim=1)\n alpha = alpha_flat.view(-1, len1, len2)\n # alpha = F.softmax(scores, dim=2)\n\n size_per_level = self.hidden_size // self.num_level\n atten_seq = alpha.bmm(x2.contiguous().view(-1, x2.size(1), self.num_level, size_per_level).transpose(1, 2).contiguous().view(-1, x2.size(1), size_per_level))\n\n return atten_seq.view(-1, self.num_level, len1, size_per_level).transpose(1, 2).contiguous().view(-1, len1, self.hidden_size)\n \n def __repr__(self):\n return \"FullAttention: (atten. {} -> {}, take {}) x {}\".format(self.full_size, self.attsize_per_lvl, self.hidden_size // self.num_level, self.num_level)\n\n\n# For summarizing a set of vectors into a single vector\nclass LinearSelfAttn(nn.Module):\n \"\"\"Self attention over a sequence:\n * o_i = softmax(Wx_i) for x_i in X.\n \"\"\"\n def __init__(self, input_size):\n super(LinearSelfAttn, self).__init__()\n self.linear = nn.Linear(input_size, 1)\n\n def forward(self, x, x_mask):\n \"\"\"\n x = batch * len * hdim\n x_mask = batch * len\n \"\"\"\n x = dropout(x, p=my_dropout_p, training=self.training)\n\n x_flat = x.contiguous().view(-1, x.size(-1))\n scores = self.linear(x_flat).view(x.size(0), x.size(1))\n scores.data.masked_fill_(x_mask.data, -float('inf'))\n alpha = F.softmax(scores, dim=1)\n return alpha\n\n\nclass BiAttn(nn.Module):\n \"\"\" Bi-Directonal Attention from https://arxiv.org/abs/1611.01603 \"\"\"\n def __init__(self, input_size, q2c: bool=True, query_dots: bool=True):\n super(BiAttn, self).__init__()\n self.input_size = input_size\n self.q2c = q2c\n self.query_dots = query_dots\n self.w_x = nn.Parameter(torch.Tensor(input_size, 1))\n self.w_y = nn.Parameter(torch.Tensor(input_size, 1))\n self.w_dot = nn.Parameter(torch.Tensor(input_size, 1))\n self.bias = nn.Parameter(torch.Tensor(1))\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.kaiming_uniform(self.w_x.data)\n nn.init.kaiming_uniform(self.w_y.data)\n nn.init.kaiming_uniform(self.w_dot.data)\n self.bias.data.zero_()\n\n def forward(self, x, y, x_mask=None, y_mask=None, raw_score_only=False):\n \"\"\"\n Args:\n x: batch * len1 * hdim (context)\n y: batch * len2 * hdim (query)\n x_mask: batch * len1 (1 for padding, 0 for true)\n y_mask: batch * len2 (1 for padding, 0 for true)\n Output:\n if raw_score_only:\n scores: batch * len1 * len2\n else:\n matched_seq: batch * len1 * hdim\n\n \"\"\"\n batch_size = x.size(0)\n len1 = x.size(1)\n len2 = y.size(1)\n\n # get the scores\n x_ext = x.unsqueeze(2)\n y_ext = y.unsqueeze(1)\n try:\n xy = x_ext * y_ext\n except:\n print('x_ext:', x_ext.size())\n print('y_ext:', y_ext.size())\n import time\n time.sleep(10)\n \n scores = self.bias.view(1, 1, 1) + \\\n x.contiguous().view(-1, self.input_size).mm(self.w_x).view(batch_size, len1, 1) + \\\n y.contiguous().view(-1, self.input_size).mm(self.w_y).view(batch_size, 1, len2) + \\\n xy.view(-1, self.input_size).mm(self.w_dot).view(batch_size, len1, len2)\n\n\n # fill the padding part with -inf\n if x_mask is not None:\n scores = maskneginf(scores, x_mask.unsqueeze(2))\n if y_mask is not None:\n scores = maskneginf(scores, y_mask.unsqueeze(1))\n\n if raw_score_only:\n return scores\n\n\n alpha = F.softmax(scores, dim=2)\n alpha.data[alpha.data != alpha.data] = 0.\n\n c2q_attn = alpha.bmm(y)\n if x_mask is not None:\n c2q_attn = maskzero(c2q_attn, x_mask.unsqueeze(2))\n outputs = [c2q_attn]\n\n # query-to-context\n if self.q2c:\n beta = F.softmax(scores.max(2)[0], dim=1)\n q2c_attn = beta.unsqueeze(1).bmm(x)\n outputs.append(q2c_attn)\n\n if self.query_dots:\n outputs.append(x * c2q_attn)\n\n return outputs\n\n def __repr__(self):\n return '{}(input_size={}, q2c={}, query_dots={})'.format(\n self.__class__.__name__, self.input_size, self.q2c, self.query_dots)\n\n\nclass Linear(nn.Module):\n ''' Simple Linear layer with xavier init '''\n def __init__(self, d_in, d_out, bias=True):\n super(Linear, self).__init__()\n self.linear = nn.Linear(d_in, d_out, bias=bias)\n init.xavier_normal(self.linear.weight)\n\n def forward(self, x):\n return self.linear(x)\n\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, d_model, attn_dropout=0.0):\n super(ScaledDotProductAttention, self).__init__()\n self.temper = d_model ** 0.5\n self.dropout = nn.Dropout(attn_dropout)\n self.softmax = BottleSoftmax(dim=-1)\n\n def forward(self, q, k, v, attn_mask=None):\n attn = torch.bmm(q, k.transpose(1, 2)) / self.temper\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(1).expand_as(attn)\n attn.data.masked_fill_(attn_mask.data, -float('inf'))\n\n attn = F.softmax(attn, dim=-1)\n attn.data[attn.data != attn.data] = 0.\n attn = self.dropout(attn)\n output = torch.bmm(attn, v)\n\n return output, attn\n\n\nclass Highway(nn.Module):\n def __init__(self, input_size, hidden_size=128, num_layers=2, dropout_rate=0.):\n super(Highway, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.reduction = (input_size != hidden_size)\n self.dropout_rate = dropout_rate\n\n if self.input_size != self.hidden_size:\n self.reduction = nn.Linear(input_size, hidden_size)\n\n self.highway_layers = nn.ModuleList()\n for i in range(num_layers):\n self.highway_layers.append(nn.Linear(hidden_size, hidden_size*2))\n\n def forward(self, x, x_mask=None):\n ndim = x.dim()\n if ndim == 3:\n batch_size = x.size(0)\n x_len = x.size(1)\n x = x.view(-1, x.size(2))\n\n if self.input_size != self.hidden_size:\n x = self.reduction(x)\n\n for layer in self.highway_layers:\n x_trans = layer(F.dropout(x, self.dropout_rate, training=self.training))\n gate = F.sigmoid(x_trans[:, self.hidden_size:])\n x_trans = F.relu(x_trans[:, :self.hidden_size])\n x = x * (1 - gate) + x_trans * gate\n\n if ndim == 3:\n x = x.view(batch_size, x_len, -1)\n\n return x\n\n\nclass Bottle(nn.Module):\n ''' Perform the reshape routine before and after an operation '''\n\n def forward(self, input):\n if len(input.size()) <= 2:\n return super(Bottle, self).forward(input)\n size = input.size()[:2]\n out = super().forward(input.view(size[0]*size[1], -1))\n return out.view(size[0], size[1], -1)\n\n\nclass BottleLinear(Bottle, Linear):\n ''' Perform the reshape routine before and after a linear projection '''\n pass\n\n\nclass BottleSoftmax(Bottle, nn.Softmax):\n ''' Perform the reshape routine before and after a softmax operation'''\n pass\n\n\nclass MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model, d_k, d_v, attn_dropout=0.0, input_layer_norm=False):\n super(MultiHeadAttention, self).__init__()\n\n if input_layer_norm:\n self.layer_norm = LayerNorm(d_model)\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))\n self.w_ks = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))\n self.w_vs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_v))\n\n self.attention = ScaledDotProductAttention(d_k, attn_dropout=attn_dropout)\n self.proj = BottleLinear(n_head*d_v, d_model)\n\n init.xavier_normal(self.w_qs)\n init.xavier_normal(self.w_ks)\n init.xavier_normal(self.w_vs)\n\n def forward(self, q, attn_mask=None):\n '''only supports self-attn'''\n if hasattr(self, 'layer_norm'):\n q = self.layer_norm(q)\n\n d_k, d_v = self.d_k, self.d_v\n n_head = self.n_head\n\n\n mb_size, len_q, d_model = q.size()\n len_k = len_q\n len_v = len_q\n\n # treat as a (n_head) size batch\n q_s = q.repeat(n_head, 1, 1).view(n_head, -1, d_model) # n_head x (mb_size*len_q) x d_model\n k_s = q_s\n v_s = q_s\n\n # treat the result as a (n_head * mb_size) size batch\n q_s = torch.bmm(q_s, self.w_qs).view(-1, len_q, d_k) # (n_head*mb_size) x len_q x d_k\n k_s = torch.bmm(k_s, self.w_ks).view(-1, len_k, d_k) # (n_head*mb_size) x len_k x d_k\n v_s = torch.bmm(v_s, self.w_vs).view(-1, len_v, d_v) # (n_head*mb_size) x len_v x d_v\n\n # perform attention, result size = (n_head * mb_size) x len_q x d_v\n if attn_mask is not None:\n attn_mask = attn_mask.repeat(n_head, 1)\n outputs, attns = self.attention(q_s, k_s, v_s, attn_mask=attn_mask)\n\n # back to original mb_size batch, result size = mb_size x len_q x (n_head*d_v)\n outputs = torch.cat(torch.split(outputs, mb_size, dim=0), dim=-1) \n\n # project back to residual size\n if hasattr(self, 'proj'):\n outputs = self.proj(outputs)\n # outputs = self.dropout(outputs)\n\n return outputs \n\n\nclass GBEncoderBlock(nn.Module):\n '''Encoder of the Google Brain paper (QANet or AdamsNet)'''\n # TODO: dropout, layer dropout\n def __init__(self, hidden_size=128, kernel_size=7, num_layers=4, dropout_rate=0., variational_dropout=True, depth_drop=0., depth_drop_start=0, depth_drop_end=None, add_pos=True):\n '''assuming input_size == hidden_size'''\n super(GBEncoderBlock, self).__init__()\n self.hidden_size = hidden_size\n self.kernel_size = kernel_size\n self.num_layers = num_layers\n self.dropout_rate = dropout_rate\n self.variational_dropout = variational_dropout\n self.depth_drop = depth_drop\n self.depth_drop_start = depth_drop_start\n self.depth_drop_end = num_layers if depth_drop_end is None else depth_drop_end\n\n self.cnns = nn.ModuleList()\n for i in range(num_layers):\n # no activation?\n self.cnns.append(nn.Sequential(\n LayerNormChannelFirst(hidden_size),\n nn.Conv1d(hidden_size, hidden_size, kernel_size, padding=kernel_size//2, groups=hidden_size),\n nn.Conv1d(hidden_size, hidden_size, 1),\n nn.ReLU(True)\n )) \n self.self_attn = MultiHeadAttention(8, hidden_size, hidden_size//8, hidden_size//8, input_layer_norm=True)\n\n self.ffn = nn.Sequential(\n LayerNorm(hidden_size),\n BottleLinear(hidden_size, hidden_size*4),\n nn.ReLU(True),\n BottleLinear(hidden_size*4, hidden_size),\n )\n # add position embeding to the first block\n if add_pos:\n self.set_pos_emb(2000)\n\n def set_pos_emb(self, l):\n self.pos_emb = nn.Parameter(get_position_encoding(self.hidden_size, [l]).unsqueeze_(0))\n self.pos_emb.requires_grad = True\n \n\n def forward(self, x, x_mask=None):\n \"\"\"\n TODO add x_mask\n x = batch * len * hidden_size\n \"\"\"\n batch_size = x.size(0)\n x_len = x.size(1)\n\n drop_i = self.depth_drop_start\n\n if hasattr(self, 'pos_emb'):\n if x_len > self.pos_emb.size(1):\n self.set_pos_emb(x_len + 200)\n x = x + self.pos_emb[:, :x_len, :]\n # if x_mask is not None:\n # print('u1:', x.data[0].sum(1))\n # maskzero(x, x_mask.unsqueeze(2))\n # print('u2:', x.data[0].sum(1))\n\n\n x = x.transpose(1, 2)\n for cnn in self.cnns:\n drop_i += 1\n depth_drop_prob = self.depth_drop * drop_i / self.depth_drop_end\n if self.depth_drop <= 0. or torch.rand(1)[0] > depth_drop_prob:\n x_drop = dropout(x.transpose(1,2), p=self.dropout_rate, training=self.training,\n variational=self.variational_dropout).transpose(1, 2)\n residual = cnn(x_drop)\n if self.training and self.depth_drop > 0.:\n residual = residual / (1 - depth_drop_prob)\n x = x + residual\n # if x_mask is not None:\n # maskzero(x, x_mask.unsqueeze(1))\n x = x.transpose(1, 2)\n\n # print('t1:', x.data.sum())\n drop_i += 1\n depth_drop_prob = self.depth_drop * drop_i / self.depth_drop_end\n if self.depth_drop <= 0. or torch.rand(1)[0] > depth_drop_prob:\n x_drop = dropout(x, p=self.dropout_rate, training=self.training,\n variational=self.variational_dropout)\n residual = self.self_attn(x_drop, x_mask)\n if self.training and self.depth_drop > 0.:\n residual = residual / (1 - depth_drop_prob)\n x = x + residual\n # print('t2:', x.data.sum())\n\n drop_i += 1\n depth_drop_prob = self.depth_drop * drop_i / self.depth_drop_end\n if self.depth_drop <= 0. or torch.rand(1)[0] > depth_drop_prob:\n x_drop = dropout(x, p=self.dropout_rate, training=self.training,\n variational=self.variational_dropout)\n residual = self.ffn(x_drop)\n if self.training and self.depth_drop > 0.:\n residual = residual / (1 - depth_drop_prob)\n x = x + residual\n # if x_mask is not None:\n # maskzero(x, x_mask.unsqueeze(2))\n return x\n\n\ndef get_position_encoding(emb_size, lengths, min_timescale=1.0, max_timescale=1.0e4): \n '''\n create position embeding of size len1 (x len2 x len3 ...) x emb_size\n reference: https://github.com/tensorflow/tensor2tensor/blob/8bdecbe434d93cb1e79c0489df20fee2d5a37dc2/tensor2tensor/layers/common_attention.py#L503\n '''\n num_dims = len(lengths)\n num_timescales = emb_size // (num_dims * 2) \n log_timescale_increment = (math.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1))\n inv_timescales = min_timescale * (torch.arange(num_timescales) * -log_timescale_increment).exp()\n inv_timescales.unsqueeze_(0)\n x = None\n for dim, length in enumerate(lengths):\n position = torch.arange(length).unsqueeze_(1)\n scaled_time = position * inv_timescales\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)\n for _ in range(dim):\n signal.unsqueeze_(0)\n for _ in range(num_dims - 1 - dim):\n signal.unsqueeze_(-2)\n x = signal if x is None else x + signal\n return x\n\n\n\n# ------------------------------------------------------------------------------\n# Functional\n# ------------------------------------------------------------------------------\n\n\ndef uniform_weights(x, x_mask):\n \"\"\"Return uniform weights over non-masked input.\"\"\"\n alpha = Variable(torch.ones(x.size(0), x.size(1)))\n if x.data.is_cuda:\n alpha = alpha.cuda()\n alpha = alpha * x_mask.eq(0).float()\n alpha = alpha / alpha.sum(1).expand(alpha.size())\n return alpha\n\n\ndef weighted_avg(x, weights):\n \"\"\"x = batch * len * d\n weights = batch * len\n \"\"\"\n return weights.unsqueeze(1).bmm(x).squeeze(1)\n\n\nclass MaskNegInf(InplaceFunction):\n @staticmethod\n def forward(ctx, input, mask=None):\n ctx.save_for_backward(mask)\n if mask is not None:\n input.masked_fill_(mask.expand_as(input), -float('inf'))\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n mask = ctx.saved_variables[0]\n if mask is not None:\n grad_output.masked_fill_(mask.expand_as(grad_output), 0)\n return grad_output, None\n\n\nclass MaskZero(InplaceFunction):\n @staticmethod\n def forward(ctx, input, mask=None):\n ctx.save_for_backward(mask)\n if mask is not None:\n input.masked_fill_(mask.expand_as(input), 0)\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n print('go:', grad_output.sum())\n mask = ctx.saved_variables[0]\n if mask is not None:\n grad_output.masked_fill_(mask.expand_as(grad_output), 0)\n return grad_output, None\n\n\ndef maskneginf(input, mask):\n return MaskNegInf.apply(input, mask)\n\n\ndef maskzero(input, mask):\n return MaskZero.apply(input, mask)\n\n\ndef split_sentences(x, sentence_lens):\n assert x.size(0) == len(sentence_lens)\n ndim = x.dim()\n if ndim == 2:\n x = x.unsqueeze(-1)\n\n\n x = x.transpose(1, 2)\n sentences = []\n max_sentence_len = max(l for s in sentence_lens for l in s)\n for i, lens in enumerate(sentence_lens):\n pos = 0\n for l in lens:\n sentences.append(F.pad(x[i, :, pos:pos+l], (0, max_sentence_len - l)).transpose(0, 1))\n pos += l\n\n if ndim == 2:\n return torch.stack(sentences, 0).squeeze_(-1)\n else:\n return torch.stack(sentences, 0)\n\n\ndef combine_sentences(x, sentence_lens):\n ndim = x.dim()\n if ndim == 2:\n x = x.unsqueeze(-1)\n docs = []\n max_doc_len = max(sum(s) for s in sentence_lens)\n sent_id = 0\n zeros = Variable(x.data.new(max_doc_len, x.size(2)).zero_(), requires_grad=False)\n for i, lens in enumerate(sentence_lens):\n doc = []\n doc_len = sum(lens)\n for l in lens:\n doc.append(x[sent_id, :l])\n sent_id += 1\n if doc_len < max_doc_len:\n doc.append(zeros[:max_doc_len-doc_len])\n doc = torch.cat(doc, 0) \n docs.append(doc)\n\n if ndim == 2:\n return torch.stack(docs, 0).squeeze(-1)\n else:\n return torch.stack(docs, 0)\n\n\ndef duplicate_for_sentences(x, sentence_lens):\n if not isinstance(x, Variable):\n x = Variable(x)\n assert x.size(0) == len(sentence_lens)\n ndim = x.dim()\n if ndim == 2:\n x = x.unsqueeze(-1)\n duplicated = []\n for i, lens in enumerate(sentence_lens):\n duplicated.append(x[i:i+1].repeat(len(lens), 1, 1))\n\n if ndim == 2:\n return torch.cat(duplicated, 0).squeeze_(-1)\n else:\n return torch.cat(duplicated, 0)\n\n\ndef reduce_for_sentences(x, sentence_lens):\n ndim = x.dim()\n if ndim == 2:\n x = x.unsqueeze(-1)\n reduced = []\n offset = 0\n for i, lens in enumerate(sentence_lens):\n reduced.append(x[offset])\n offset += len(lens)\n\n if ndim == 2:\n return torch.stack(reduced, 0).squeeze_(-1)\n else:\n return torch.stack(reduced, 0)\n\n\ndef replace_nan_grad_hook(grad):\n grad.data.masked_fill_(grad.data != grad.data, 0)\n return grad\n\n\ndef print_hook(name):\n def hook(grad):\n print('{}: {}/{}'.format(name, (grad.data != grad.data).sum(), grad.data.numel()))\n return hook\n\n\ndef logsumexp(x, dim=None, keepdim=False):\n if dim is None:\n x, dim = x.view(-1), 0\n xm, _ = torch.max(x, dim, keepdim=True)\n output = xm + torch.log(torch.sum(torch.exp(x - xm), dim, keepdim=True))\n return output if keepdim else output.squeeze(dim)\n\n\ndef my_where(cond, xt, xf):\n ret = torch.zeros_like(xt)\n ret[cond] = xt[cond]\n ret[cond ^ 1] = xf[cond ^ 1]\n return ret\n\n\n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.GLU", "torch.nn.Dropout2d", "torch.max", "torch.load", "torch.cat", "torch.nn.functional.dropout", "torch.zeros", "torch.sin", "torch.nn.Embedding", "torch.nn.utils.rnn.pad_packed_sequence", "torch.FloatTensor", "torch.split", "torch.nn.init.xavier_normal", "torch.autograd.Variable", "torch.nn.Dropout", "torch.ones", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.Sigmoid", "torch.nn.functional.sigmoid", "torch.nn.functional.relu", "torch.sort", "torch.bmm", "torch.arange", "torch.rand", "torch.cos", "torch.nn.functional.pad", "torch.nn.ModuleList", "torch.zeros_like", "torch.exp", "torch.nn.Linear", "torch.nn.Conv1d", "torch.stack", "torch.nn.functional.log_softmax", "torch.nn.LSTM", "torch.Tensor", "torch.nn.ReLU", "torch.nn.init.kaiming_uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jfcarr/GFPGAN
[ "d37467fe8a4f7ead994a6dfe87f8de191b3010b4" ]
[ "gfpgan/utils.py" ]
[ "import cv2\nimport os\nimport torch\nfrom basicsr.utils import img2tensor, tensor2img\nfrom basicsr.utils.download_util import load_file_from_url\nfrom facexlib.utils.face_restoration_helper import FaceRestoreHelper\nfrom torchvision.transforms.functional import normalize\n\nfrom gfpgan.archs.gfpgan_bilinear_arch import GFPGANBilinear\nfrom gfpgan.archs.gfpganv1_arch import GFPGANv1\nfrom gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean\n\nROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass GFPGANer():\n \"\"\"Helper for restoration with GFPGAN.\n\n It will detect and crop faces, and then resize the faces to 512x512.\n GFPGAN is used to restored the resized faces.\n The background is upsampled with the bg_upsampler.\n Finally, the faces will be pasted back to the upsample background image.\n\n Args:\n model_path (str): The path to the GFPGAN model. It can be urls (will first download it automatically).\n upscale (float): The upscale of the final output. Default: 2.\n arch (str): The GFPGAN architecture. Option: clean | original. Default: clean.\n channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.\n bg_upsampler (nn.Module): The upsampler for the background. Default: None.\n \"\"\"\n\n def __init__(self, model_path, upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=None):\n self.upscale = upscale\n self.bg_upsampler = bg_upsampler\n\n # initialize model\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # initialize the GFP-GAN\n if arch == 'clean':\n self.gfpgan = GFPGANv1Clean(\n out_size=512,\n num_style_feat=512,\n channel_multiplier=channel_multiplier,\n decoder_load_path=None,\n fix_decoder=False,\n num_mlp=8,\n input_is_latent=True,\n different_w=True,\n narrow=1,\n sft_half=True)\n elif arch == 'bilinear':\n self.gfpgan = GFPGANBilinear(\n out_size=512,\n num_style_feat=512,\n channel_multiplier=channel_multiplier,\n decoder_load_path=None,\n fix_decoder=False,\n num_mlp=8,\n input_is_latent=True,\n different_w=True,\n narrow=1,\n sft_half=True)\n elif arch == 'original':\n self.gfpgan = GFPGANv1(\n out_size=512,\n num_style_feat=512,\n channel_multiplier=channel_multiplier,\n decoder_load_path=None,\n fix_decoder=True,\n num_mlp=8,\n input_is_latent=True,\n different_w=True,\n narrow=1,\n sft_half=True)\n # initialize face helper\n self.face_helper = FaceRestoreHelper(\n upscale,\n face_size=512,\n crop_ratio=(1, 1),\n det_model='retinaface_resnet50',\n save_ext='png',\n device=self.device)\n\n if model_path.startswith('https://'):\n model_path = load_file_from_url(\n url=model_path, model_dir=os.path.join(ROOT_DIR, 'gfpgan/weights'), progress=True, file_name=None)\n loadnet = torch.load(model_path)\n if 'params_ema' in loadnet:\n keyname = 'params_ema'\n else:\n keyname = 'params'\n self.gfpgan.load_state_dict(loadnet[keyname], strict=True)\n self.gfpgan.eval()\n self.gfpgan = self.gfpgan.to(self.device)\n\n @torch.no_grad()\n def enhance(self, img, has_aligned=False, only_center_face=False, paste_back=True):\n self.face_helper.clean_all()\n\n if has_aligned: # the inputs are already aligned\n img = cv2.resize(img, (512, 512))\n self.face_helper.cropped_faces = [img]\n else:\n self.face_helper.read_image(img)\n # get face landmarks for each face\n self.face_helper.get_face_landmarks_5(only_center_face=only_center_face, eye_dist_threshold=5)\n # eye_dist_threshold=5: skip faces whose eye distance is smaller than 5 pixels\n # TODO: even with eye_dist_threshold, it will still introduce wrong detections and restorations.\n # align and warp each face\n self.face_helper.align_warp_face()\n\n # face restoration\n for cropped_face in self.face_helper.cropped_faces:\n # prepare data\n cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)\n normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)\n cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device)\n\n try:\n output = self.gfpgan(cropped_face_t, return_rgb=False)[0]\n # convert to image\n restored_face = tensor2img(output.squeeze(0), rgb2bgr=True, min_max=(-1, 1))\n except RuntimeError as error:\n print(f'\\tFailed inference for GFPGAN: {error}.')\n restored_face = cropped_face\n\n restored_face = restored_face.astype('uint8')\n self.face_helper.add_restored_face(restored_face)\n\n if not has_aligned and paste_back:\n # upsample the background\n if self.bg_upsampler is not None:\n # Now only support RealESRGAN for upsampling background\n bg_img = self.bg_upsampler.enhance(img, outscale=self.upscale)[0]\n else:\n bg_img = None\n\n self.face_helper.get_inverse_affine(None)\n # paste each restored face to the input image\n restored_img = self.face_helper.paste_faces_to_input_image(upsample_img=bg_img)\n return self.face_helper.cropped_faces, self.face_helper.restored_faces, restored_img\n else:\n return self.face_helper.cropped_faces, self.face_helper.restored_faces, None\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xiaoyw71/Feature-engineering-machine-learning
[ "6fb78d29ef696e3aa2dd2eeceaa07a347358ba0f" ]
[ "src/DataBase/ComboDataAnalysis.py" ]
[ "# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 2020年12月21日\r\n\r\n@author: 肖永威\r\n'''\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport json\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n#多维特征数据进行聚类分析\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.cluster import KMeans,MiniBatchKMeans\r\nfrom sklearn.cluster import Birch\r\nfrom sklearn.mixture import GaussianMixture\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\nfrom DataBase.Car_Info import Gas_Collection\r\nfrom matplotlib import cm\r\n\r\nclass ExploreDataAnalysis(object):\r\n def __init__(self,datas):\r\n '''\r\n Constructor\r\n ''' \r\n self.df = datas\r\n # 统一量纲为小时\r\n self.df['fuel_time'] = round(self.df['fuel_time']/60,2) \r\n # 进出站间隔时间 \r\n self.df['Plate_interva'] = self.df['Entry_interva'] + self.df['Dep_interva'] + self.df['fuel_time']\r\n self.df['Entry_pre'] = round(self.df['Entry_interva']/self.df['Plate_interva'],3)*100 \r\n self.df['Dep_pre'] = round(self.df['Dep_interva']/self.df['Plate_interva'],3)*100 \r\n \r\n def correlation_analysis(self,cols_name=[]):\r\n #names = ['price','fuelle','amount','Payment','vol','changes',\r\n # 'fuel_month','fuel_day','changes_month','changes_day','fuel_interva','time_before']\r\n fig = plt.figure() #调用figure创建一个绘图对象\r\n ax = fig.add_subplot(111)\r\n cols_num = len(cols_name)\r\n \r\n if cols_num >1:\r\n df = self.df[cols_name]\r\n else:\r\n df = self.df\r\n # 获取表默认列的数量\r\n cols_name = df.columns\r\n cols_num = len(df.columns)\r\n \r\n ax.set_xticklabels(cols_name) #生成x轴标签\r\n ax.set_yticklabels(cols_name) \r\n \r\n correlations = df.corr(method='pearson',min_periods=1) #计算变量之间的相关系数矩阵\r\n correlations.to_excel('dcorr1.xlsx')\r\n print(correlations)\r\n # plot correlation matrix\r\n\r\n cax = ax.matshow(correlations,cmap = 'inferno', vmin=-1, vmax=1) #绘制热力图,从-1到1\r\n fig.colorbar(cax) #将matshow生成热力图设置为颜色渐变条\r\n ticks = np.arange(0,cols_num,1) #生成0-9,步长为1\r\n ax.set_xticks(ticks) #生成刻度\r\n ax.set_yticks(ticks)\r\n\r\n plt.show()\r\n \r\n return correlations\r\n \r\n def Features_extra(self,drop_cols_name=[],times=1):\r\n df = self.df.drop(drop_cols_name,axis=1)\r\n\r\n # 取在同日、同一加油站相遇的次数\r\n # groupby中的as_index=False,对于聚合输出,返回以组标签作为索引的对象。仅与DataFrame输入相关。as_index = False实际上是“SQL风格”的分组输出。\r\n df_feature = df.groupby(['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag'], as_index=False)['fuel_interva'].count() \r\n #print(df_feature1)\r\n #df_feature = df_feature1[['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag','fuel_time']]\r\n df_feature = df_feature.rename(columns={'fuel_interva':'times'})\r\n # 取相遇次数>times的数据集\r\n df_feature = df_feature[df_feature['times']>times].reset_index(drop=True) \r\n # 依据相遇次数,筛选数据集\r\n df = pd.merge(df,df_feature[['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag']],how='inner',on=['IC_ID', 'License_plate', \r\n 'IC_ID_R', 'License_plate_R', 'Flag'],right_index=True)\r\n print(df)\r\n print(df_feature)\r\n # 取均值\r\n df_feature1 = df[['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag','fuel_time','IC_time','Shop_time','Entry_time','Dep_time', 'Entry_interva','Shop_interva', \r\n 'Dep_interva','combos','Plate_interva','Entry_pre','Dep_pre']].groupby(['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag'], as_index=False).mean()\r\n df_feature = pd.merge(left=df_feature, right=df_feature1,how=\"left\",on=['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag']) #左连接 \r\n print(df_feature.dtypes)\r\n # 取标准差\r\n df_feature1 = df[['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag','fuel_time','IC_time','Shop_time','Entry_time','Dep_time', 'Entry_interva', 'Shop_interva',\r\n 'Dep_interva','Plate_interva','Entry_pre','Dep_pre']].groupby(['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag'], as_index=False).std()\r\n \r\n print(df_feature1.dtypes)\r\n #修改为标准差列名\r\n df_feature1 = df_feature1.rename(columns={'fuel_time':'fuel_time_std','IC_time':'IC_time_std','Entry_time':'Entry_time_std','Dep_time':'Dep_time_std', \r\n 'Entry_interva':'Entry_interva_std', 'Dep_interva':'Dep_interva_std', 'Plate_interva':'Plate_interva_std','Shop_time':'Shop_time_std',\r\n 'Entry_pre':'Entry_pre_std','Dep_pre':'Dep_pre_std','Shop_interva':'Shop_interva_std'})\r\n print(df_feature1.dtypes)\r\n df_feature = pd.merge(left=df_feature, right=df_feature1,how=\"left\",on=['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag']) #左连接 \r\n print(df_feature1.dtypes)\r\n '''\r\n # 取最大值\r\n df_feature1 = df[['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag','Shop_interva','Entry_interva', \r\n 'Dep_interva']].groupby(['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag'], as_index=False).max()\r\n \r\n #修改为最大值列名\r\n df_feature1 = df_feature1.rename(columns={'Entry_interva':'Entry_interva_max', 'Dep_interva':'Dep_interva_max','Shop_interva':'Shop_interva_max'})\r\n df_feature = pd.merge(left=df_feature, right=df_feature1,how=\"left\",on=['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag']) #左连接 \r\n # 取最小值\r\n df_feature1 = df[['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag','Entry_interva', \r\n 'Dep_interva']].groupby(['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag'], as_index=False).min()\r\n \r\n #修改为最小值列名\r\n df_feature1 = df_feature1.rename(columns={'Entry_interva':'Entry_interva_min', 'Dep_interva':'Dep_interva_min'})\r\n df_feature = pd.merge(left=df_feature, right=df_feature1,how=\"left\",on=['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag']) #左连接 \r\n \r\n # 偏度(三阶)\r\n if times>=5:\r\n df_feature3 = df[['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag','fuel_time','IC_time','Entry_time','Dep_time', 'Entry_interva', \r\n 'Dep_interva','combos','Plate_interva','Entry_pre','Dep_pre' ]].groupby(['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag'], as_index=False).skew()\r\n df_feature3 = df_feature3.rename(columns={'fuel_time':'fuel_time_skew','IC_time':'IC_time_skew','Entry_time':'Entry_time_skew',\r\n 'Dep_time':'Dep_time_skew', 'Entry_interva':'Entry_interva_skew', 'Dep_interva':'Dep_interva_skew','combos':'combos_skew',\r\n 'Plate_interva':'Plate_interva_skew','Entry_pre':'Entry_pre_skew','Dep_pre':'Dep_pre_skew'})\r\n df_feature = pd.merge(left=df_feature, right=df_feature3,how=\"left\",on=['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R', 'Flag']) #左连接 \r\n \r\n for index,row in df_feature.iterrows():\r\n IC_ID = row['IC_ID']\r\n License_plate = row['License_plate']\r\n IC_ID_R = row['IC_ID_R']\r\n License_plate_R = row['License_plate_R']\r\n \r\n df_tmp = df[(df['IC_ID']==IC_ID) & (df['License_plate']==License_plate) & (df['IC_ID_R']==IC_ID_R) & (df['License_plate_R']==License_plate_R)][['fuel_time','IC_time',\r\n 'Entry_time','Dep_time', 'Entry_interva', 'Dep_interva','combos','Plate_interva','Entry_pre','Dep_pre']]\r\n \r\n \r\n k4 = df_tmp.kurt()\r\n df_feature.loc[index:index,('fuel_time_kurt','IC_time_kurt', 'Entry_time_kurt','Dep_time_kurt', 'Entry_interva_kurt', 'Dep_interva_kurt', 'combos_kurt',\r\n 'Plate_interva_kurt','Entry_pre_kurt','Dep_pre_kurt')] = k4.tolist() \r\n '''\r\n print(df_feature)\r\n self.data_feature = df_feature\r\n\r\n df_feature = df_feature.drop(['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R'],axis=1)\r\n \r\n print(df_feature.columns)\r\n #df_feature = df_feature[df_feature['times']>1].reset_index(drop=True) \r\n print(df_feature)\r\n self.df_feature = df_feature\r\n \r\n return df_feature\r\n \r\n def get_Data_Feature(self):\r\n return self.data_feature\r\n # 获取结果集的列名\r\n def get_Feature_Columns(self):\r\n \r\n return self.df_feature.columns\r\n \r\n def set_DataAnalysis(self,datas):\r\n \r\n self.data_feature = datas\r\n self.df_feature = datas.drop(['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R'],axis=1)\r\n \r\n return\r\n \r\n def show_heatmap(self):\r\n fig = plt.figure() #调用figure创建一个绘图对象\r\n ax = fig.add_subplot(111)\r\n cols_name = self.df_feature.columns\r\n cols_num = len(self.df_feature.columns)\r\n correlations = self.df_feature.corr(method='pearson',min_periods=1) #计算变量之间的相关系数矩阵\r\n self.corr = correlations\r\n correlations.to_excel('dcorr1.xlsx')\r\n print(correlations)\r\n # plot correlation matrix\r\n\r\n cax = ax.matshow(correlations,cmap = 'inferno', vmin=-1, vmax=1) #绘制热力图,从-1到1\r\n fig.colorbar(cax) #将matshow生成热力图设置为颜色渐变条\r\n ticks = np.arange(0,cols_num,1) #生成0-9,步长为1\r\n ax.set_xticks(ticks) #生成刻度\r\n ax.set_yticks(ticks)\r\n ax.set_xticklabels(cols_name) #生成x轴标签\r\n ax.set_yticklabels(cols_name) \r\n plt.show()\r\n \r\n return correlations\r\n \r\n def important_feature(self,cols_name): \r\n df = self.df_feature[cols_name].fillna(0)\r\n print(df.dtypes)\r\n print(df[df.isnull().T.any()])\r\n \r\n y_df = df[['Flag']]\r\n X_df = df.drop('Flag',axis=1)\r\n #注意训练集、测试集返回参数顺序\r\n x_train,x_text, y_train, y_test = train_test_split(X_df,y_df,test_size=0.1)\r\n \r\n #y_test = df_test_y.values\r\n # n_estimators:森林中树的数量,随机森林中树的数量默认10个树,精度递增显著,但并不是越多越好,加上verbose=True,显示进程使用信息\r\n # n_jobs 整数 可选(默认=1) 适合和预测并行运行的作业数,如果为-1,则将作业数设置为核心数\r\n forest_model = RandomForestClassifier(n_estimators=10, random_state=0, n_jobs=-1)\r\n forest_model.fit(x_train, y_train)\r\n feat_labels = X_df.columns\r\n #feat_labels = col_names[1:]\r\n # 下面对训练好的随机森林,完成重要性评估\r\n # feature_importances_ 可以调取关于特征重要程度\r\n importances = forest_model.feature_importances_\r\n print(\"重要性:\", importances)\r\n x_columns = X_df.columns\r\n #x_columns = col_names[1:]\r\n indices = np.argsort(importances)[::-1]\r\n x_columns_indices = []\r\n for f in range(x_train.shape[1]):\r\n # 对于最后需要逆序排序,我认为是做了类似决策树回溯的取值,从叶子收敛\r\n # 到根,根部重要程度高于叶子。\r\n print(\"%2d) %-*s %f\" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]]))\r\n x_columns_indices.append(feat_labels[indices[f]])\r\n \r\n print(x_columns_indices)\r\n print(len(x_columns))\r\n print(x_columns)\r\n #print(np.arange(x_columns.shape[0]))\r\n \r\n # 筛选变量(选择重要性比较高的变量)\r\n #threshold = 0.05\r\n #x_selected = x_train[:, importances > threshold]\r\n \r\n plt.figure(figsize=(6, 6))\r\n plt.rcParams['font.sans-serif'] = [\"SimHei\"]\r\n plt.rcParams['axes.unicode_minus'] = False \r\n plt.title(\"IC卡加油与号牌识别集中各个特征的重要程度\", fontsize=16)\r\n plt.ylabel(\"import level\", fontsize=12, rotation=90)\r\n \r\n num = len(x_columns)\r\n for i in range(num):\r\n plt.bar(i, importances[indices[i]], color='blue', align='center')\r\n plt.xticks(np.arange(num), x_columns_indices, rotation=90, fontsize=12)\r\n plt.tight_layout()\r\n \r\n plt.show() \r\n \r\n return importances\r\n \r\n def cluster_analysis(self):\r\n df = self.df_feature.fillna(0)\r\n \r\n Flag=list(df['Flag'])\r\n #print(grain_variety)\r\n df=df.drop('Flag',axis=1)\r\n print(df.columns)\r\n print(df)\r\n \r\n # 升纬没有效果\r\n #poly = PolynomialFeatures(degree=2,include_bias = False)\r\n #X = df.values\r\n #samples = poly.fit_transform(X)\r\n # df = df[['times']]\r\n samples=df.values\r\n #samples=df_poly.values\r\n print(samples)\r\n \r\n #标准化\r\n scaler=StandardScaler()\r\n \r\n kmeans=KMeans(n_clusters=2,random_state=9,precompute_distances='auto',max_iter=1000)\r\n pipeline=make_pipeline(scaler,kmeans)\r\n pipeline.fit(samples) #训练模型\r\n labels=pipeline.predict(samples)#预测\r\n \r\n df_cluster=pd.DataFrame({'labels':labels,'Flag':Flag})\r\n ct=pd.crosstab(df_cluster['labels'],df_cluster['Flag'])\r\n print('K-Means')\r\n print(ct)\r\n\r\n #标准化\r\n scaler=StandardScaler() \r\n Minikmeans=MiniBatchKMeans(n_clusters=2,random_state=9,max_iter=1000)\r\n print('MiniBatchKMeans')\r\n pipeline=make_pipeline(scaler,Minikmeans)\r\n pipeline.fit(samples) #训练模型\r\n labels=pipeline.predict(samples)#预测 \r\n\r\n df_cluster=pd.DataFrame({'labels':labels,'Flag':Flag})\r\n ct=pd.crosstab(df_cluster['labels'],df_cluster['Flag'])\r\n print(ct)\r\n \r\n print('Birch') \r\n birch = Birch(n_clusters = 2,threshold = 0.6)\r\n est = birch.fit(samples)\r\n labels = est.labels_\r\n df_cluster=pd.DataFrame({'labels':labels,'Flag':Flag})\r\n ct=pd.crosstab(df_cluster['labels'],df_cluster['Flag'])\r\n print(ct)\r\n \r\n print('GaussianMixture') \r\n gmm = GaussianMixture(n_components=2)\r\n gmm.fit(samples)\r\n labels = gmm.predict(samples) \r\n \r\n df_cluster=pd.DataFrame({'labels':labels,'Flag':Flag})\r\n ct=pd.crosstab(df_cluster['labels'],df_cluster['Flag'])\r\n print(ct)\r\n\r\n return\r\n \r\n def dimension_upgrading(self,df,f1):\r\n \r\n return df\r\n \r\n def draw_corr_bar(self,key_name): \r\n values = self.corr.loc[key_name].values.tolist()\r\n print(values)\r\n draw_bar(self.df_feature.columns,values)\r\n \r\n return\r\n \r\n def draw_Hist_KDE(self):\r\n name = self.df_feature.columns\r\n k= 0\r\n fig1, ax1 = plt.subplots(nrows=6, ncols=4)\r\n for i in range(6): \r\n for j in range(4): \r\n ax1[i, j].hist(self.df_feature[name[k]],density=True)\r\n\r\n ax1[i, j].set_ylabel(name[k])\r\n k = k + 1\r\n \r\n plt.show() \r\n \r\ndef draw_bar(key_name,key_values):\r\n plt.rcParams['font.sans-serif']=['SimHei'] #显示中文标签\r\n plt.rcParams['axes.unicode_minus']=False\r\n # 标准柱状图的值\r\n def autolable(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n if height>=0:\r\n plt.text(rect.get_x()+rect.get_width()/2.0 - 0.3,height+0.02,'%.3f'%height)\r\n else:\r\n plt.text(rect.get_x()+rect.get_width()/2.0 - 0.3,height-0.06,'%.3f'%height)\r\n # 如果存在小于0的数值,则画0刻度横向直线\r\n plt.axhline(y=0,color='black')\r\n #归一化\r\n norm = plt.Normalize(-1,1)\r\n norm_values = norm(key_values)\r\n map_vir = cm.get_cmap(name='inferno')\r\n colors = map_vir(norm_values)\r\n fig = plt.figure() #调用figure创建一个绘图对象\r\n plt.subplot(111)\r\n ax = plt.bar(key_name,key_values,width=0.5,color=colors,edgecolor='black') # edgecolor边框颜色\r\n \r\n sm = cm.ScalarMappable(cmap=map_vir,norm=norm) # norm设置最大最小值\r\n sm.set_array([])\r\n plt.colorbar(sm)\r\n autolable(ax)\r\n \r\n plt.show()\r\n\r\ndef generate_combo_data(times = 0):\r\n GC = Gas_Collection('study')\r\n \r\n df = GC.get_combo_data()\r\n \r\n # 'combos' 进出站时间内,发生IC加油的次数\r\n drop_cols_name = ['fuelle_date', 'Entry_date', 'Departure_date']\r\n print(df.columns) \r\n print(df.dtypes) \r\n # 探索性数据分析\r\n EDA = ExploreDataAnalysis(df)\r\n \r\n #EDA.correlation_analysis(cols_name)\r\n df_feature = EDA.Features_extra(drop_cols_name,times)\r\n print(df_feature.dtypes)\r\n \r\n GC.generate_Analysis_data('Analysis_data1', EDA.get_Data_Feature())\r\n \r\n # 展现热力图\r\n EDA.show_heatmap()\r\n \r\n EDA.draw_corr_bar('Flag')\r\n \r\n cols_name = ['Flag', 'times','fuel_time','IC_time','Entry_time','Dep_time', 'Entry_interva', 'Dep_interva','combos',\r\n 'fuel_time_std','IC_time_std','Entry_time_std','Dep_time_std', 'Entry_interva_std', 'Dep_interva_std','combos_std',\r\n 'fuel_time_kurt','IC_time_kurt', 'Entry_time_kurt','Dep_time_kurt', 'Entry_interva_kurt', 'Dep_interva_kurt','combos_kurt'] \r\n\r\n cols_name = ['Flag', 'times','fuel_time','IC_time','Entry_time','Dep_time', 'Entry_interva', 'Dep_interva','combos',\r\n 'fuel_time_std','IC_time_std','Entry_time_std','Dep_time_std', 'Entry_interva_std', 'Dep_interva_std',\r\n 'Plate_interva','Entry_pre','Dep_pre','Plate_interva_std','Entry_pre_std','Dep_pre_std','Shop_time',\r\n 'Shop_interva','Shop_interva_std']\r\n #'Entry_interva_max', 'Dep_interva_max','Entry_interva_min', 'Dep_interva_min'] #,'Shop_interva'] #,'Shop_interva_std','Shop_interva_max'] \r\n\r\n cols_name = EDA.get_Feature_Columns()\r\n\r\n EDA.important_feature(cols_name)\r\n EDA.cluster_analysis() \r\n \r\n return\r\n\r\ndef ExploreDataAnalysis_combo_data(dbname = 'Analysis_data'): \r\n GC = Gas_Collection('study')\r\n \r\n df = GC.get_combo_data()\r\n \r\n # 'combos' 进出站时间内,发生IC加油的次数\r\n drop_cols_name = ['fuelle_date', 'Entry_date', 'Departure_date']\r\n print(df.columns) \r\n # 探索性数据分析\r\n EDA = ExploreDataAnalysis(df)\r\n \r\n cols_name = ['IC_ID', 'License_plate', 'IC_ID_R', 'License_plate_R',\r\n 'Flag', 'times', 'fuel_time', 'IC_time', 'Shop_time', 'Entry_time',\r\n 'Dep_time', 'Entry_interva', 'Shop_interva', 'Dep_interva', 'combos',\r\n 'Plate_interva', 'Entry_pre', 'Dep_pre', 'fuel_time_std', 'IC_time_std',\r\n 'Shop_time_std', 'Entry_time_std', 'Dep_time_std', 'Entry_interva_std',\r\n 'Shop_interva_std', 'Dep_interva_std', 'Plate_interva_std',\r\n 'Entry_pre_std', 'Dep_pre_std']\r\n datas = GC.get_Analysis_data(dbname, cols_name)\r\n EDA.set_DataAnalysis(datas)\r\n \r\n # 展现热力图\r\n EDA.show_heatmap()\r\n \r\n EDA.draw_corr_bar('Flag')\r\n \r\n cols_name = ['Flag', 'times','fuel_time','IC_time','Entry_time','Dep_time', 'Entry_interva', 'Dep_interva','combos',\r\n 'fuel_time_std','IC_time_std','Entry_time_std','Dep_time_std', 'Entry_interva_std', 'Dep_interva_std','combos_std',\r\n 'fuel_time_kurt','IC_time_kurt', 'Entry_time_kurt','Dep_time_kurt', 'Entry_interva_kurt', 'Dep_interva_kurt','combos_kurt'] \r\n\r\n cols_name = ['Flag', 'times','fuel_time','IC_time','Entry_time','Dep_time', 'Entry_interva', 'Dep_interva','combos',\r\n 'fuel_time_std','IC_time_std','Entry_time_std','Dep_time_std', 'Entry_interva_std', 'Dep_interva_std',\r\n 'Plate_interva','Entry_pre','Dep_pre','Plate_interva_std','Entry_pre_std','Dep_pre_std','Shop_time',\r\n 'Shop_interva','Shop_interva_std']\r\n #'Entry_interva_max', 'Dep_interva_max','Entry_interva_min', 'Dep_interva_min'] #,'Shop_interva'] #,'Shop_interva_std','Shop_interva_max'] \r\n\r\n cols_name = EDA.get_Feature_Columns()\r\n\r\n #EDA.important_feature(cols_name)\r\n EDA.draw_Hist_KDE()\r\n EDA.cluster_analysis()\r\n \r\n return\r\n\r\n \r\nif __name__ == '__main__':\r\n # 1 生成数据并分析\r\n #generate_combo_data(times = 0)\r\n # 2 分析数据\r\n ExploreDataAnalysis_combo_data('Analysis_data1')\r\n \r\n pass\r\n" ]
[ [ "pandas.merge", "sklearn.cluster.KMeans", "sklearn.cluster.MiniBatchKMeans", "pandas.DataFrame", "pandas.crosstab", "matplotlib.pyplot.tight_layout", "sklearn.ensemble.RandomForestClassifier", "numpy.arange", "matplotlib.pyplot.subplot", "matplotlib.cm.ScalarMappable", "sklearn.cluster.Birch", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.Normalize", "sklearn.mixture.GaussianMixture", "numpy.argsort", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.axhline", "sklearn.pipeline.make_pipeline", "matplotlib.pyplot.subplots", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.bar", "matplotlib.cm.get_cmap", "sklearn.preprocessing.StandardScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
tiancaishaonvjituizi/Paddle
[ "5d08a4471973e1c2b2a595781d0a0840875a0c77" ]
[ "python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py" ]
[ "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport unittest\nimport paddle\nimport paddle.static\nfrom paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest\n\n\[email protected](not paddle.is_compiled_with_ipu(),\n \"core is not compiled with IPU\")\nclass TestBase(IPUOpTest):\n def setUp(self):\n self.set_atol()\n self.set_data_feed()\n self.set_feed_attr()\n self.set_attrs()\n\n def set_atol(self):\n self.atol = 1e-6\n\n def set_data_feed(self):\n self.feed = {\n \"image\": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'),\n }\n\n def set_feed_attr(self):\n self.feed_shape = [x.shape for x in self.feed.values()]\n self.feed_list = list(self.feed.keys())\n self.feed_dtype = [x.dtype for x in self.feed.values()]\n\n def set_attrs(self):\n self.attrs = {\n \"optimizer\": 'sgd',\n \"weight_decay\": 0.0,\n \"loss_scaling\": 1.0,\n }\n\n def _test_optimizer(self, run_ipu=True):\n scope = paddle.static.Scope()\n main_prog = paddle.static.Program()\n startup_prog = paddle.static.Program()\n main_prog.random_seed = self.SEED\n startup_prog.random_seed = self.SEED\n np.random.seed(self.SEED)\n\n with paddle.static.scope_guard(scope):\n with paddle.static.program_guard(main_prog, startup_prog):\n image = paddle.static.data(\n name='image', shape=[1, 3, 10, 10], dtype='float32')\n conv1 = paddle.static.nn.conv2d(\n image, num_filters=3, filter_size=3, bias_attr=False)\n loss = paddle.mean(conv1)\n\n weight_decay = self.attrs['weight_decay']\n opt = paddle.optimizer.SGD(learning_rate=1e-1,\n weight_decay=weight_decay)\n if self.attrs['optimizer'] == 'adam':\n opt = paddle.optimizer.Adam(\n learning_rate=1e-1, weight_decay=weight_decay)\n elif self.attrs['optimizer'] == 'lamb':\n\n opt = paddle.optimizer.Lamb(\n learning_rate=1e-1, lamb_weight_decay=weight_decay)\n opt.minimize(loss)\n\n if run_ipu:\n place = paddle.IPUPlace()\n else:\n place = paddle.CPUPlace()\n exe = paddle.static.Executor(place)\n exe.run(startup_prog)\n\n if run_ipu:\n feed_list = [image.name]\n fetch_list = [loss.name]\n ipu_strategy = paddle.static.IpuStrategy()\n ipu_strategy.set_graph_config(is_training=True)\n ipu_strategy.loss_scaling = self.attrs[\"loss_scaling\"]\n program = paddle.static.IpuCompiledProgram(\n main_prog, ipu_strategy=ipu_strategy).compile(feed_list,\n fetch_list)\n else:\n program = main_prog\n\n result = []\n for epoch in range(100):\n loss_res = exe.run(program, feed=self.feed, fetch_list=[loss])\n result.append(loss_res)\n\n return np.array(result)\n\n def test(self):\n # cpu and ipu dimenstion mismatch, cpu:(100, 1, 1), ipu:(100, 1)\n ipu_loss = self._test_optimizer(True).flatten()\n cpu_loss = self._test_optimizer(False).flatten()\n\n self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=self.atol))\n\n\[email protected]('do not support L2 regularization')\nclass TestSGD(TestBase):\n def set_attrs(self):\n self.attrs = {\n \"optimizer\": 'sgd',\n \"weight_decay\": 0.1,\n \"loss_scaling\": 2.0,\n }\n\n\[email protected]('do not support L2 regularization')\nclass TestAdamCase1(TestBase):\n def set_attrs(self):\n self.attrs = {\n \"optimizer\": 'adam',\n \"weight_decay\": 0.1,\n \"loss_scaling\": 3.0,\n }\n\n\nclass TestAdamCase2(TestBase):\n def set_attrs(self):\n self.attrs = {\n \"optimizer\": 'adam',\n \"weight_decay\": 0.0,\n \"loss_scaling\": 4.0,\n }\n\n\[email protected]('seems cpu output wrong')\nclass TestLambCase1(TestBase):\n def set_attrs(self):\n self.attrs = {\n \"optimizer\": 'lamb',\n \"weight_decay\": 0.0,\n \"loss_scaling\": 5.0,\n }\n\n\[email protected]('seems cpu output wrong')\nclass TestLamb(TestBase):\n def set_attrs(self):\n self.attrs = {\n \"optimizer\": 'lamb',\n \"weight_decay\": 0.1,\n \"loss_scaling\": 6.0,\n }\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.random.uniform", "numpy.array", "numpy.allclose", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vdogmcgee/Machine-Learning-Demo
[ "5c8cddf4ba397bcf2dab0cfc942f5d1223d7dfed" ]
[ "logisitc_regression_demo.py" ]
[ "# -*- encoding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\n\n\ndef create_data():\n iris = load_iris()\n df = pd.DataFrame(iris.data, columns=iris.feature_names)\n df['label'] = iris.target\n # 花萼长度, 花萼宽度, 花瓣长度, 花瓣宽度, 分类标签\n df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']\n # 只选取前100条数据, 只有0,1两类, 0表示山鸢尾, 1表示变色鸢尾\n data = np.array(df.iloc[:100, [0,1,-1]])\n # 只选取前两列特征(花萼长度, 花萼宽度)\n return data[:,:2], data[:,-1]\n\n\nclass LRClassifier:\n def __init__(self, epochs=200, lr=0.01):\n self.epochs = epochs\n self.lr = lr\n \n def linear(self, x):\n return np.dot(x, self.w)\n \n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n \n def fit(self, X, y):\n # 增广特征向量\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n # 增广权重向量\n self.w = np.zeros((X.shape[1], 1))\n # y增加一维\n y = np.expand_dims(y, axis=1)\n # 训练\n for epoch in range(self.epochs):\n z = self.linear(X)\n y_pred = self.sigmoid(z)\n error = y_pred - y\n self.w = self.w - self.lr * np.dot(X.T, error)\n print(f'loss: {np.abs(error.T.sum()) / X.shape[0]}')\n \n \nif __name__ == '__main__': \n \n # 数据准备和划分\n X, y = create_data()\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) \n \n # 查看数据\n PLOT = False\n if PLOT:\n plt.scatter(X[:50,0],X[:50,1], label='0')\n plt.scatter(X[50:,0],X[50:,1], label='1')\n plt.xlabel(u'花萼长度(cm)', fontproperties='SimHei', color='red')\n plt.ylabel(u'花萼宽度(cm)', fontproperties='SimHei', color='red')\n plt.legend()\n plt.show()\n else:\n # 定义模型\n model = LRClassifier()\n # 训练\n model.fit(X_train, y_train)\n # 画出分类决策边界\n x_ponits = np.arange(4, 8)\n # 分类平面为 w1 * x1 + w2 * x2 + w0 = 0, 而图中是w2关于w1的函数, 转化为一条直线\n y_ = - (model.w[1] * x_ponits + model.w[0]) / model.w[2]\n plt.plot(x_ponits, y_, color='green')\n plt.scatter(X[:50,0],X[:50,1], label='0')\n plt.scatter(X[50:,0],X[50:,1], label='1')\n plt.xlabel(u'花萼长度(cm)', fontproperties='SimHei', color='red')\n plt.ylabel(u'花萼宽度(cm)', fontproperties='SimHei', color='red')\n plt.legend()\n plt.show()\n " ]
[ [ "numpy.dot", "matplotlib.pyplot.legend", "numpy.expand_dims", "matplotlib.pyplot.scatter", "numpy.arange", "sklearn.datasets.load_iris", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "numpy.ones", "matplotlib.pyplot.plot", "numpy.exp", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
scheckmedia/photils-api
[ "c1f06608573221cf76b63303126c44f5c0242353" ]
[ "api/autotagger.py" ]
[ "from flask import Blueprint, request, jsonify, current_app\nfrom .utils import ApiException\nimport base64\nimport numpy as np\n\napi = Blueprint('auto_tagger_api', 'auto_tagger_api')\n\n\[email protected]('/tags', methods=['POST'])\ndef get_tags_by_feature():\n tagger = current_app.tagger\n data = request.get_json()\n key = None\n allowed_keys = ['feature', 'features', 'image']\n\n for k in data.keys():\n if k in allowed_keys:\n key = k\n break\n\n if key is None:\n raise ApiException(\"invalid feature parameter\", 400)\n\n if key == 'feature':\n if not isinstance(data[key], list):\n feature = np.frombuffer(base64.decodebytes(str.encode(data[key])), dtype=np.float32)\n else:\n feature = np.array(data[key])\n\n if feature.shape[-1] != tagger.DIMENSIONS:\n raise ApiException(\"invalid dimension of feature vector\", 400)\n\n query = [np.array(feature)]\n elif key == 'features':\n if not len(data[key]):\n raise ApiException(\"empty request\", 400)\n\n features = []\n for feature in data[key]:\n if not isinstance(feature, list):\n feature = np.frombuffer(base64.decodebytes(str.encode(feature)), dtype=np.float32)\n else:\n feature = np.array(feature)\n\n if feature.shape[-1] != tagger.DIMENSIONS:\n raise ApiException(\"invalid dimension of feature vector\", 400)\n\n features += [feature]\n\n query = np.array(features)\n else:\n query = [tagger.get_feature(data['image'])]\n\n recommended_tags = tagger.get_tags(query)\n\n\n return jsonify({'tags': recommended_tags, 'success': True})\n\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iydon/homework
[ "253d4746528ef62d33eba1de0b90dcb17ec587ed" ]
[ "DCC/1/main.py" ]
[ "#!/usr/bin/python3\n'''\n@Reference: https://github.com/wenj18/HW_TV3d\n'''\nimport collections\nimport numpy as np\n\n\nclass Worker:\n '''\n '''\n def __init__(self, I, dt=1e-1, lam=1e-2, eps=1e-4):\n '''\n '''\n self.I = I\n self._dt = dt\n self._lam = lam\n self._eps = eps\n self._size = I.shape\n\n def next(self, J, number=8):\n row, col, channel = self._size\n for ith in range(number):\n start = ith*row // number - 1\n end = (ith+1)*row // number + 1\n if start < 0:\n start += 1\n DfJx = J[(start+1):(end+1), :, :] - J[start:end, :, :]\n elif end > row:\n end -= 1\n DfJx = J[list(range(start+1, end))+[end-1], :, :] - J[start:end, :, :]\n else:\n DfJx = J[(start+1):(end+1), :, :] - J[start:end, :, :]\n DfJy = J[start:end, list(range(1, col))+[col-1], :] - J[start:end, :, :]\n DfJz = J[start:end, :, list(range(1, channel))+[channel-1]] - J[start:end, :, :]\n TempDJ = (self._eps + DfJx*DfJx + DfJy*DfJy + DfJz*DfJz) ** (1/2)\n DivJx = DfJx / TempDJ\n DivJy = DfJy / TempDJ\n DivJz = DfJz / TempDJ\n mi, ni, li = DivJx.shape\n if start == 0:\n div = DivJx[0:(mi-1), :, :] - DivJx[[0]+list(range(mi-2)), :, :] \\\n + DivJy[0:(mi-1), :, :] - DivJy[0:(mi-1), [0]+list(range(ni-1)), :] \\\n + DivJz[0:(mi-1), :, :] - DivJz[0:(mi-1), :, [0]+list(range(li-1))]\n J[start:(end-1), :, :] += self._dt*div \\\n - self._dt * self._lam * (J[start:(end-1), :, :]-self.I[start:(end-1), :, :])\n elif end == row:\n mi += 1\n end += 1\n div = DivJx[1:(mi-1), :, :] - DivJx[0:(mi-2), :, :] \\\n + DivJy[1:(mi-1), :, :] - DivJy[1:(mi-1), [0]+list(range(ni-1)), :] \\\n + DivJz[1:(mi-1), :, :] - DivJz[1:(mi-1), :, [0]+list(range(li-1))]\n J[(start+1):(end-1), :, :] += self._dt*div \\\n - self._dt * self._lam * (J[(start+1):(end-1), :, :]-self.I[(start+1):(end-1), :, :])\n else:\n div = DivJx[1:(mi-1), :, :] - DivJx[0:(mi-2), :, :] \\\n + DivJy[1:(mi-1), :, :] - DivJy[1:(mi-1), [0]+list(range(ni-1)), :]\\\n + DivJz[1:(mi-1), :, :] - DivJz[1:(mi-1), :, [0]+list(range(li-1))]\n J[(start+1):(end-1), :, :] += self._dt*div \\\n - self._dt * self._lam * (J[(start+1):(end-1), :, :]-self.I[(start+1):(end-1), :, :])\n return J\n\n @classmethod\n def split(cls, total, number):\n total = tuple(total)\n length = len(total)\n start = 0\n for ith in range(number):\n end = (ith+1) * length // number\n yield total[start:end]\n start = end\n\n @classmethod\n def split_images(cls, Is, number):\n *_, total = Is.shape\n for idx in cls.split(range(total), number):\n yield Is[:, :, idx]\n\n\ndef default_image(nx=200, ny=200, nz=200, mean=0, sigma=12):\n I = 100 * np.ones((nx, ny, nz), dtype='float64')\n f = lambda ratio1, ratio2, number: slice(int(number*ratio1), int(number*ratio2))\n I[f(0.5, 0.75, nx), f(0.5, 0.75, ny), f(0.5, 0.75, nz)] = 150.0\n return I + np.random.normal(mean, sigma, (nx, ny, nz))\n\n\nif __name__ == '__main__':\n from mpi4py import MPI\n import matplotlib.pyplot as plt\n\n T = 100\n comm = MPI.COMM_WORLD\n size = comm.Get_size()\n rank = comm.Get_rank()\n number = [None, 240, 120, 80, 60][size]\n nx, ny, nz = 200, 200, size*number\n\n time = MPI.Wtime()\n\n # Split images\n if rank == 0:\n I = default_image(nx, ny, nz)\n for ith, J in enumerate(Worker.split_images(I, size)):\n if ith == 0:\n origin = J\n else:\n comm.Send(J.copy(), dest=ith, tag=10)\n else:\n origin = np.empty((nx, ny, number), dtype='float64')\n comm.Recv(origin, source=0, tag=10)\n plt.figure()\n plt.imshow(origin[:, 100, :], 'gray')\n plt.savefig(f'result/{size}/noised-{rank}.png')\n\n # Denoising\n J = origin.copy()\n w = Worker(origin)\n *_, channel = J.shape\n for t in range(T):\n if rank == 0 and not t%5:\n print(t, 'out of', T)\n J = w.next(J)\n\n if rank != size-1:\n sendbuf = J[:, :, channel-1].copy()\n comm.Send(sendbuf, dest=rank+1, tag=20)\n if rank != 0:\n recbuf = np.empty((nx, ny), dtype='float64')\n comm.Recv(recbuf, source=rank-1, tag=20)\n J[:, :, 0] = recbuf\n\n # Combine images\n if rank == 0:\n result = np.empty((nx, ny, nz), dtype='float64')\n for ith, val in enumerate(Worker.split(range(nz), size)):\n if ith == 0:\n result[:, :, val] = J\n else:\n recbuf = np.empty((nx, ny, number), 'float64')\n comm.Recv(recbuf, source=ith, tag=30)\n result[:, :, val] = recbuf\n plt.figure()\n plt.imshow(result[:, 100, :], 'gray')\n plt.savefig(f'result/{size}/result.png')\n else:\n sendbuf = J.copy()\n comm.Send(sendbuf, dest=0, tag=30)\n\n time = MPI.Wtime() - time\n print(rank, time)\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.savefig", "numpy.ones", "numpy.random.normal", "numpy.empty", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Alpus/text
[ "41413d193fd502e8a0754ac4b60b4c645bdf9ac7" ]
[ "tensorflow_text/public_names_test.py" ]
[ "# coding=utf-8\n# Copyright 2019 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test that the expected symbols are made public by tensorflow_text.\n\nEach public module should have an _allowed_symbols attribute, listing the\npublic symbols for that module; and that list should match the actual list\nof public symbols in that module.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport types\n\nimport tensorflow_text as tensorflow_text\nfrom tensorflow.python.platform import test\n\n\nclass PublicNamesTest(test.TestCase):\n\n def check_names(self, module, prefix=\"tf_text.\"):\n self.assertTrue(\n hasattr(module, \"_allowed_symbols\"),\n \"Expected to find _allowed_symbols in %s\" % prefix)\n\n actual_symbols = set(\n name for name in module.__dict__ if not name.startswith(\"_\"))\n missing_names = set(module._allowed_symbols) - set(actual_symbols)\n extra_names = set(actual_symbols) - set(module._allowed_symbols)\n\n self.assertEqual(extra_names, set(),\n \"Unexpected symbol(s) exported by %s\" % prefix)\n self.assertEqual(missing_names, set(),\n \"Missing expected symbol(s) in %s\" % prefix)\n\n for (name, value) in module.__dict__.items():\n if isinstance(value, types.ModuleType) and not name.startswith(\"_\"):\n self.check_names(value, prefix + name + \".\")\n\n def testPublicNames(self):\n self.check_names(tensorflow_text)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "tensorflow.python.platform.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
venkateshIedge7/Streamlit_Stock
[ "988376d853f07dc4aa031cad170f5a84e490e3f2" ]
[ "reddit_extraction.py" ]
[ "# Reddit date time is converted into readable date time \r\n\r\nimport pandas as pd\r\n\r\nimport praw\r\nfrom praw.models import MoreComments\r\n\r\nimport datetime as dt\r\nimport reddit_config as r_cnf\r\nimport streamlit as st\r\n\r\n \r\n \r\ndef get_date(created):\r\n return dt.datetime.fromtimestamp(created)\r\n \r\n \r\n #Function is used to go into induvidual posts and extract the entire comment tree \r\n \r\ndef extract_comm_tree_to_df_ramp(id2): #Input a ID of string; returns a df of \r\n import reddit_config as r_cnf # setup your config page - for username and password for your respective reddit account\r\n #Setting up a reddit model\r\n try:\r\n reddit = praw.Reddit(client_id='AVu7k513AHb_DSBUp3GPPg',\\\r\n client_secret='YtLs8QTniQRUZHxjCL1_uUsdvMPiyA', \\\r\n user_agent='CABD',\\\r\n username= r_cnf.reddit['accessCode'] ,\\\r\n password= r_cnf.reddit['secretCode'] )\r\n except:\r\n print(\"Error in accessing redit env\")\r\n \r\n post = reddit.submission(id=id2)\r\n Subreddit_com_dict = {\r\n \"score\":[],\\\r\n \"id\":[],\\\r\n \"created\": [],\\\r\n \"com_body\":[],\\\r\n \"comm_tier1\":[],\\\r\n \"comm_tier2\":[]\r\n }\r\n post.comments.replace_more(limit=0)\r\n comments = post.comments.list()\r\n \r\n for comment in post.comments.list():\r\n if isinstance(comment, MoreComments):\r\n continue\r\n Subreddit_com_dict[\"score\"].append(comment.score)\r\n Subreddit_com_dict[\"id\"].append(comment.id)\r\n Subreddit_com_dict[\"created\"].append(comment.created)\r\n Subreddit_com_dict[\"com_body\"].append(comment.body)\r\n for reply in comment.replies:\r\n if isinstance(reply, MoreComments):\r\n continue\r\n Subreddit_com_dict[\"comm_tier1\"].append(reply.body)\r\n for reply2 in reply.replies:\r\n if isinstance(reply2, MoreComments):\r\n continue\r\n Subreddit_com_dict[\"comm_tier2\"].append(reply2.body)\r\n \r\n Subreddit_com_data = pd.DataFrame.from_dict(Subreddit_com_dict, orient='index').transpose()\r\n _timestamp = Subreddit_com_data[\"created\"].apply(get_date)\r\n Subreddit_com_data = Subreddit_com_data.assign(timestamp = _timestamp)\r\n Subreddit_com_data_1 = Subreddit_com_data.drop(['created'], axis=1)\r\n return Subreddit_com_data_1 \r\n \r\n \r\n #Function calls the comment tree extraction function in each of the Posts inside a subreddit: \r\n \r\ndef extract_reddit_post_com_rep_ramp(subreddit_name,n): # Subreddit_name = String \r\n # setup your config page - for username and password for your respective reddit account\r\n \r\n #Setting up a reddit model\r\n reddit = praw.Reddit(client_id='AVu7k513AHb_DSBUp3GPPg', \\\r\n client_secret='YtLs8QTniQRUZHxjCL1_uUsdvMPiyA', \\\r\n user_agent='CABD', \\\r\n username= r_cnf.reddit['accessCode'] , \\\r\n password= r_cnf.reddit['secretCode'] )\r\n try:\r\n GME_subreddit = reddit.subreddit(subreddit_name)\r\n except:\r\n print(\"Error in passing subreddit_name value\")\r\n \r\n top_subreddit = GME_subreddit.top(limit=1000)\r\n Subreddit_dict = { \"title\":[],\\\r\n \"score\":[],\\\r\n \"id\":[],\\\r\n \"url\":[],\\\r\n \"comms_num\": [],\\\r\n \"created\": [],\\\r\n \"body\":[]}\r\n for submission in top_subreddit:\r\n Subreddit_dict[\"title\"].append(submission.title)\r\n Subreddit_dict[\"score\"].append(submission.score)\r\n Subreddit_dict[\"id\"].append(submission.id)\r\n Subreddit_dict[\"url\"].append(submission.url)\r\n Subreddit_dict[\"comms_num\"].append(submission.num_comments)\r\n Subreddit_dict[\"created\"].append(submission.created)\r\n Subreddit_dict[\"body\"].append(submission.selftext)\r\n \r\n Subreddit_data = pd.DataFrame(Subreddit_dict)\r\n Subreddit_top_com_id = { \"id\":[] }\r\n Top_comm_posts = Subreddit_data['comms_num'].nlargest(n=n)\r\n for index in Top_comm_posts.index:\r\n Subreddit_top_com_id[\"id\"].append(Subreddit_data.iloc[index]['id'])\r\n \r\n def get_date(created):\r\n return dt.datetime.fromtimestamp(created)\r\n \r\n _timestamp = Subreddit_data[\"created\"].apply(get_date)\r\n Subreddit_data = Subreddit_data.assign(timestamp = _timestamp)\r\n Subreddit_data = Subreddit_data.drop(['created'], axis=1)\r\n # Top comment containeing reddit post's ID have been obtained \r\n # Now to extract the 2 tier comment tree of these posts\r\n All_Data_Com = pd.DataFrame([])\r\n my_bar = st.progress(0)\r\n\r\n for I in range(n):\r\n print(\"Current post bieng scraped is %(post)d\" % {\"post\":I})\r\n \r\n i = I\r\n q = (i+1)/n\r\n percent = q\r\n my_bar.progress(percent)\r\n \r\n Data = extract_comm_tree_to_df_ramp(Subreddit_data['id'][I])\r\n \r\n All_Data_Com = All_Data_Com.append(Data)\r\n \r\n \r\n return Subreddit_data,All_Data_Com; " ]
[ [ "pandas.DataFrame", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
LaudateCorpus1/Bella-5
[ "7de51ff4914bdefbcf05e490b85517c5fb014595" ]
[ "bella/word_vectors.py" ]
[ "'''\nContains classes that train and/or load semantic vectors. All classes are sub\nclasses of WordVectors\n\nClasses:\n\n1. WordVectors - Base class of all classes within this module. Ensures\nconsistent API for all word vectors classes.\n2. GensimVectors - Creates `Word2Vec <https://arxiv.org/pdf/1301.3781.pdf>`_\nand `FastText <https://arxiv.org/abs/1607.04606>`_ vectors.\n3. PreTrained - Creates a Wordembedding for those that are stored in TSV files\nwhere the first item in the line is the word and the rest of the tab sep values\nare its vector representation. Currently loads the Tang et al. vectors\n`from <https://github.com/bluemonk482/tdparse/tree/master/resources/wordemb/sswe>`_\n'''\n\nfrom collections import defaultdict\nimport os\nimport math\nimport types\nimport tempfile\nimport zipfile\nfrom pathlib import Path\n\nimport numpy as np\nimport requests\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom gensim.models import word2vec\nfrom gensim.models.wrappers import FastText\nfrom gensim.scripts.glove2word2vec import glove2word2vec\nfrom tqdm import tqdm\n\nBELLA_VEC_DIR = Path.home().joinpath('.Bella', 'Vectors')\n\n\nclass WordVectors(object):\n '''\n Base class for all WordVector classes. Contains the following instance\n attributes:\n\n 1. vector_size - Size of the word vectors e.g. 100\n 2. index2word - Mapping between index number and associated word\n 3. index2vector - mapping between index and vector\n 4. word2index - Mapping between word and associated index\n 5. name - This is used to identify the model when reading cross validation \\\n results from models, used mainly for debugging. Default is None. Used\n 6. unknown_vector - The vector that is returned for any unknwon words and \\\n for the index=0.\n 7. unknown_word - The word that is returned for the 0 index. Default is \\\n `<unk>`\n 8. unit_length - If the vectors when returned are their unit norm value \\\n instead of their raw values.\n 9. unknown_index - The index of the unknown word normally 0\n 10. padding_word - The word (<pad>) that defines padding indexs.\n 11. padding_index - index of the padding word\n 12. padding vector - padding vector for the padding word.\n\n padding index, word and vector are equal to the unknown equilavents if\n padding_value = None in the constructor. Else padding index = 0, word = <pad>\n and vector is what you have defined, this then moves the unknown index to\n vocab size + 1 and the everything else is the same. The idea behind the 2\n are that pad is used for padding and unknown is used for words that are\n truely unknown therefore allowing you to only skip the pad vectors when\n training a model by using a masking function in keras.\n\n The index 0 is used as a special index to map to unknown words. Therefore \\\n the size of the vocabularly is len(index2word) - 1.\n\n Following methods:\n\n 1. :py:func:`bella.word_vectors.WordVectors.lookup_vector`\n '''\n def __init__(self, word2vector, name=None, unit_length=False,\n padding_value=None, filter_words=None):\n self.filter_words = [] if filter_words is None else filter_words\n size_vector_list = self._check_vector_size(word2vector)\n self.vector_size, self._word2vector, self._word_list = size_vector_list\n self.name = '{}'.format(name)\n self.unit_length = unit_length\n # Method attributes\n self.unknown_word = self._unknown_word()\n self.unknown_vector = self._unknown_vector()\n self.unknown_index = 0\n self.padding_word = self.unknown_word\n self.padding_vector = None\n if padding_value is not None:\n self.unknown_index = len(self._word_list) + 1\n self.padding_vector = np.asarray([padding_value] *\n self.vector_size)\n self.padding_word = '<pad>'\n else:\n self.padding_vector = self.unknown_vector\n if self.padding_vector is None:\n raise ValueError('Padding Vector is None')\n self.index2word = self._index2word()\n self.word2index = self._word2index()\n self.index2vector = self._index2vector()\n self.embedding_matrix = self._embedding_matrix()\n\n def _keyed_vec_2_dict(self, key_vector):\n word_2_vec_dict = {}\n for word in key_vector.vocab:\n word_2_vec_dict[word] = key_vector[word]\n return word_2_vec_dict\n\n def _check_vector_size(self, word2vector):\n '''\n This finds the most common vector size in the word 2 vectors dictionary\n mapping. Normally they should all have the same mapping but it has been\n found that their could be some mistakes in pre-compiled word vectors\n therefore this function removes all words and vectors that do not conform\n to the majority vector size. An example of this would be if all the\n word to vector mappings are on dimension 50 but one word that one word\n would be removed from the dictionary mapping and not included in the\n word list returned.\n\n :param word2vector: A dictionary containing words as keys and their \\\n associated vector representation as values.\n :type word2vector: dict or gensim.models.keyedvectors.KeyedVectors\n :returns: A tuple of length 3 containg 1. The dimension of the vectors, \\\n 2. The dictionary of word to vectors, 3. The list of words in the dictionary\n :rtype: tuple\n '''\n # Gensim does not used a dictionary but a special class\n if isinstance(word2vector, KeyedVectors):\n word2vector = self._keyed_vec_2_dict(word2vector)\n #accepted_words = word2vector.index2word\n #most_likely_size = word2vector[accepted_words[0]].shape[0]\n #return most_likely_size, word2vector, accepted_words\n vector_sizes = {}\n for _, vector in word2vector.items():\n vector_size = vector.shape[0]\n vector_sizes[vector_size] = vector_sizes.get(vector_size, 0) + 1\n most_likely_size = sorted(vector_sizes.items(), reverse=True,\n key=lambda size_freq: size_freq[0])[0][0]\n words_to_remove = []\n unk_word = self._unknown_word()\n accepted_words = []\n for word, vector in word2vector.items():\n if vector.shape[0] != most_likely_size:\n words_to_remove.append(word)\n elif self.filter_words != []:\n if word not in self.filter_words and word != unk_word:\n words_to_remove.append(word)\n else:\n accepted_words.append(word)\n else:\n accepted_words.append(word)\n for word in words_to_remove:\n del word2vector[word]\n return most_likely_size, word2vector, accepted_words\n\n @staticmethod\n def glove_txt_binary(glove_file_path: Path):\n '''\n Converts the Glove word embedding file which is a text file to a\n binary file that can be loaded through gensims\n KeyedVectors.load_word2vec_format method and deletes the text file\n version and returns the file path to the new binary file.\n\n :param glove_file_path: File path to the downloaded glove vector text \\\n file.\n :type glove_file_path: String\n :returns: The file path to the binary file version of the glove vector\n :rtype: String\n '''\n # File path to the binary file\n binary_file_path = os.path.splitext(glove_file_path.name)\n binary_file_path = binary_file_path[0]\n binary_file_path += '.binary'\n binary_file_path = glove_file_path.parent.joinpath(binary_file_path)\n if binary_file_path.is_file():\n return str(binary_file_path.resolve())\n with tempfile.NamedTemporaryFile('w', encoding='utf-8') as temp_file:\n # Converts to word2vec file format\n print('Converting word vectors file from text to binary for '\n 'quicker load time')\n glove2word2vec(str(glove_file_path.resolve()), temp_file.name)\n word_vectors = KeyedVectors.load_word2vec_format(temp_file.name,\n binary=False)\n # Creates the binary file version of the word vectors\n binary_file_path = str(binary_file_path.resolve())\n word_vectors.save_word2vec_format(binary_file_path, binary=True)\n # Delete the text version of the glove vectors\n glove_file_path.unlink()\n return binary_file_path\n\n def lookup_vector(self, word):\n '''\n Given a word returns the vector representation of that word. If the model\n does not have a representation for that word it returns word vectors\n unknown word vector (most models this is zeors)\n\n :param word: A word\n :type word: String\n :returns: The word vector for that word. If no word vector can be found\n returns a vector of zeros.\n :rtype: numpy.ndarray\n '''\n\n if isinstance(word, str):\n word_index = self.word2index[word]\n return self.index2vector[word_index]\n raise ValueError('The word parameter must be of type str not '\n f'{type(word)}')\n\n def _index2word(self):\n '''\n The index starts at one as zero is a special value assigned to words that\n are padded. The word return for zero is defined by `padded_word` attribute.\n If the padded value is different to the unknown word value then the index\n will contain an extra index for the unknown word index which is the\n vocab size + 1 index.\n\n :returns: A dictionary matching word indexs to there corresponding words.\n Inverse of :py:func:`bella.word_vectors.GensimVectors.word2index`\n :rtype: dict\n '''\n\n index_word = {}\n index_word[0] = self.padding_word\n index = 1\n for word in self._word_list:\n if word == self.unknown_word:\n continue\n index_word[index] = word\n index += 1\n # Required as the unknown word might have been learned and in\n # self._word_list\n if self.unknown_index != 0:\n self.unknown_index = len(index_word)\n index_word[self.unknown_index] = self.unknown_word\n return index_word\n\n def _return_unknown_index(self):\n '''\n :returns: zero. Used as lambda is not pickleable\n :rtype: int\n '''\n return self.unknown_index\n\n def _word2index(self):\n '''\n If you have specified a special padded index vector then the <pad> word\n would match to index 0 and the vocab + 1 index will be <unk> else if\n no special pad index vector then vocab + 1 won't exist and <unk> will be\n 0\n\n :returns: A dictionary matching words to there corresponding index.\n Inverse of :py:func:`bella.word_vectors.GensimVectors.index2word`\n :rtype: dict\n '''\n\n # Cannot use lambda function as it cannot be pickled\n word2index_dict = defaultdict(self._return_unknown_index)\n for index, word in self.index2word.items():\n word2index_dict[word] = index\n return word2index_dict\n\n def _index2vector(self):\n '''\n NOTE: the zero index is mapped to the unknown index unless padded vector\n is specified then zero index is padded index and vocab + 1 index is\n unknown index\n\n :returns: A dictionary of word index to corresponding word vector. Same\n as :py:func:`bella.word_vectors.GensimVectors.lookup_vector` but\n instead of words that are looked up it is the words index.\n :rtype: dict\n '''\n\n def unit_norm(vector):\n '''\n :param vector: A 1-dimension vector\n :type vector: numpy.ndarray\n :returns: The vector normalised to it's unit vector.\n :rtype: numpy.ndarray\n '''\n\n # Check it is not a zero vector\n if np.array_equal(np.zeros(self.vector_size), vector):\n return vector\n l2_norm = np.linalg.norm(vector)\n return vector / l2_norm\n\n index_vector = {}\n if self.unit_length:\n index_vector[0] = unit_norm(self.padding_vector)\n index_vector[self.unknown_index] = unit_norm(self.unknown_vector)\n else:\n index_vector[0] = self.padding_vector\n index_vector[self.unknown_index] = self.unknown_vector\n for index, word in self.index2word.items():\n if index == 0 or index == self.unknown_index:\n continue\n if self.unit_length:\n index_vector[index] = unit_norm(self._word2vector[word])\n else:\n index_vector[index] = self._word2vector[word]\n return index_vector\n\n def __repr__(self):\n return self.name\n\n def _unknown_vector(self):\n '''\n This is to be Overridden by sub classes if they want to return a custom\n unknown vector.\n\n :returns: A vector for all unknown words. In this case it is a zero\n vector.\n :rtype: numpy.ndarray\n '''\n\n return np.zeros(self.vector_size)\n\n def _unknown_word(self):\n '''\n :returns: The word that is returned for the 0 index.\n :rtype: String\n '''\n\n return '<unk>'\n\n def _embedding_matrix(self):\n '''\n The embedding matrix that can be used in Keras embedding layer as the\n weights. It is very much a simple lookup where the key is the word index.\n\n :retunrs: The embedding matrix of dimension (vocab_size + 1, vector_size) \\\n where the vocab size is + 1 due to the unknown vector.\n :rtype: numpy.ndarray\n '''\n matrix = np.zeros((len(self.index2vector), self.vector_size),\n dtype=np.float32)\n for index, vector in self.index2vector.items():\n try:\n matrix[index] = vector\n except Exception as e:\n word = self.index2word[index]\n print(f'{word} {index} {vector} {e}')\n return matrix\n\n\nclass GensimVectors(WordVectors):\n '''\n Class that can create one of the following word vector models:\n\n 1. `Word2Vec <https://radimrehurek.com/gensim/models/word2vec.html>`_.\n 2. `Fasttext <https://radimrehurek.com/gensim/models/wrappers/fasttext.html>`_\n\n private attributes:\n\n 1. self._model = Gensim instance of the chosen model e.g. if word2vec\n was chosen then it would be `gensim.models.word2vec.Word2Vec`\n '''\n\n def __init__(self, file_path, train_data, name=None, model=None,\n unit_length=False, padding_value=None,\n filter_words=None, **kwargs):\n '''\n Trains or loads the model specified.\n\n :param file_path: Path to the saved model or Path to save the model\n once trained. Can be None if you only want to train.\n :param train_data: An object like a list that can be iterated that contains\n tokenised text, to train the model e.g. [['hello', 'how'], ['another']].\n Not required if `file_path` contains a trained model.\n :param model: The name of the model\n :param name: The name of given to the instance.\n :param unit_length: If the word vectors should be normalised to unit \\\n vectors\n :param kwargs: The keyword arguments to give to the Gensim Model that is \\\n being used i.e. keyword argument to `Word2Vec <https://radimrehurek.com/\\\n gensim/models/word2vec.html#gensim.models.word2vec.Word2Vec>`_\n :type file_path: String\n :type train_data: iterable object e.g. list\n :type model: String\n :type name: String Default None\n :type unit_length: bool. Default False.\n :type kwargs: dict\n '''\n\n allowed_models = {'word2vec': word2vec.Word2Vec,\n 'fasttext': FastText}\n if model not in allowed_models:\n raise ValueError('model parameter has to be one of the following '\n f'{allowed_models.keys()} not {model}')\n model = allowed_models[model]\n failed_to_load = True\n\n if isinstance(file_path, str):\n file_path = os.path.abspath(file_path)\n\n if isinstance(file_path, str):\n if os.path.isfile(file_path):\n try:\n self._model = model.load(file_path)\n failed_to_load = False\n except EOFError:\n failed_to_load = True\n\n if hasattr(train_data, '__iter__') and failed_to_load:\n # Generators throws an error in Gensim\n if isinstance(train_data, types.GeneratorType):\n train_data = map(lambda x: x, train_data)\n self._model = model(train_data, **kwargs)\n if isinstance(file_path, str):\n self._model.save(file_path)\n print('{} model has been saved to {}'.format(model.__name__,\n file_path))\n elif failed_to_load:\n raise Exception('Cannot create model as there is no path to extract '\\\n 'a model from {} or any data to train on which has '\\\n 'to have the __iter__ function {}'\\\n .format(file_path, train_data))\n super().__init__(self._model.wv, name=name, unit_length=unit_length,\n padding_value=padding_value,\n filter_words=filter_words)\n\n\nclass VoVectors(GensimVectors):\n\n def download(self, skip_conf):\n vo_folder = BELLA_VEC_DIR.joinpath('Vo Word Vectors')\n vo_folder.mkdir(parents=True, exist_ok=True)\n current_vector_files = set([vo_file.name for vo_file\n in vo_folder.iterdir()])\n all_vector_files = set(['c10_w3_s100',\n 'c10_w3_s100.syn0.npy',\n 'c10_w3_s100.syn1.npy'])\n interset = all_vector_files.intersection(current_vector_files)\n # If the files in the folder aren't all the glove files that would be\n # downloaded re-download the zip and unzip the files.\n if interset != all_vector_files:\n can_download = 'yes'\n if not skip_conf:\n download_msg = 'We are going to download the Vo Word vectors '\\\n 'this is a download of 120MB '\\\n 'Would you like to continue? If so type '\\\n '`yes`\\n>> '\n can_download = input(download_msg)\n\n if can_download.strip().lower() == 'yes':\n base_url = 'https://github.com/bluemonk482/tdparse/raw/'\\\n 'master/resources/wordemb/w2v/'\n link_locations = [(f'{base_url}c10_w3_s100',\n vo_folder.joinpath('c10_w3_s100')),\n (f'{base_url}c10_w3_s100.syn0.npy',\n Path(vo_folder, 'c10_w3_s100.syn0.npy')),\n (f'{base_url}c10_w3_s100.syn1.npy',\n Path(vo_folder, 'c10_w3_s100.syn1.npy'))]\n print('Downloading Vo vectors')\n for download_link, file_location in link_locations:\n # Reference:\n # http://docs.python-requests.org/en/master/user/quickstart/#raw-response-content\n with file_location.open('wb') as vo_file:\n request = requests.get(download_link, stream=True)\n total_size = int(request.headers.get('content-length',\n 0))\n for chunk in tqdm(request.iter_content(chunk_size=128),\n total=math.ceil(total_size//128)):\n vo_file.write(chunk)\n else:\n raise Exception('Vo vectors not downloaded therefore '\n 'cannot load them')\n return str(vo_folder.joinpath('c10_w3_s100').resolve())\n\n def __init__(self, name=None, unit_length=False,\n padding_value=None, skip_conf=False,\n filter_words=None):\n vector_file = self.download(skip_conf)\n\n if name is None:\n name = 'w2v'\n super().__init__(vector_file, train_data=None, name=name,\n model='word2vec', unit_length=unit_length,\n padding_value=padding_value,\n filter_words=filter_words)\n\n\n\nclass PreTrained(WordVectors):\n '''\n Class that loads word vectors that have been pre-trained.\n\n All pre-trained word vectors have to follow the following conditions:\n\n 1. New word vector on each line\n 2. Each line is tab seperated (by default but a tab is just delimenter \\\n which can be changed by setting delimenter argument in the constructor)\n 3. The first tab sperated value on the line is the word\n 4. The rest of the tab seperated values on that line represent the values\n for the associtaed word.\n '''\n\n def __init__(self, file_path, name=None, unit_length=False,\n delimenter='\\t', padding_value=None, filter_words=None):\n '''\n :param file_path: The file path to load the word vectors from\n :param name: The name given to the instance.\n :param unit_length: If the word vectors should be normalised to unit \\\n vectors\n :param delimenter: The value to be used to split the values in each line \\\n of the word vectors.\n :type file_path: String\n :type name: String Default None\n :type unit_length: bool. Default False\n :type delimenter: String. Default `\\t`\n '''\n if not isinstance(file_path, str):\n raise TypeError('The type of the file path should be str not {}'\\\n .format(type(file_path)))\n file_path = os.path.abspath(file_path)\n if not os.path.isfile(file_path):\n raise ValueError('There is no file at file path {}'.format(file_path))\n\n word2vector = {}\n with open(file_path, 'r') as data_file:\n for org_line in data_file:\n line = org_line.strip()\n word_values = line.split(delimenter)\n word = word_values[0]\n # This attempts to remove words that have whitespaces in them\n # a sample of this problem can be found within the Glove\n # Common Crawl 840B vectors where \\[email protected] ==\n # [email protected] after strip is applied and they have different\n # vectors\n if word in word2vector:\n org_word = org_line.split(delimenter)[0]\n if word != org_word:\n continue\n else:\n del word2vector[word]\n word_vector = np.asarray(word_values[1:], dtype='float32')\n if word in word2vector:\n dict_vector = word2vector[word]\n raise KeyError('{} already has a vector in the word vector '\\\n 'dict. Vector in dict {} and alternative vector {}'\\\n .format(word, dict_vector, word_vector))\n else:\n word2vector[word] = word_vector\n super().__init__(word2vector, name=name, unit_length=unit_length,\n padding_value=padding_value,\n filter_words=filter_words)\n\n def _unknown_vector(self):\n '''\n Overrides. Instead of returnning zero vector it return the vector for\n the word `<unk>`.\n\n :returns: The vector for the word `<unk>`\n :rtype: numpy.ndarray\n '''\n\n return self._word2vector['<unk>']\n\n\nclass SSWE(PreTrained):\n\n def download(self, skip_conf):\n '''\n '''\n sswe_folder = BELLA_VEC_DIR.joinpath('SSWE')\n sswe_folder.mkdir(parents=True, exist_ok=True)\n sswe_fp = sswe_folder.joinpath('sswe')\n # If the files in the folder aren't all the SSWE files that would be\n # downloaded re-download the zip and unzip the files.\n if not sswe_fp.is_file():\n can_download = 'yes'\n if not skip_conf:\n download_msg = 'We are going to download the SSWE vectors '\\\n 'this is a download of 74MB '\\\n 'Would you like to continue? If so type '\\\n '`yes`\\n>> '\n can_download = input(download_msg)\n\n if can_download.strip().lower() == 'yes':\n download_link = 'https://github.com/bluemonk482/tdparse/raw/'\\\n 'master/resources/wordemb/sswe/sswe-u.txt'\n # Reference:\n # http://docs.python-requests.org/en/master/user/quickstart/#raw-response-content\n with sswe_fp.open('wb') as sswe_file:\n request = requests.get(download_link, stream=True)\n total_size = int(request.headers.get('content-length', 0))\n print('Downloading SSWE vectors')\n for chunk in tqdm(request.iter_content(chunk_size=128),\n total=math.ceil(total_size//128)):\n sswe_file.write(chunk)\n else:\n raise Exception('SSWE vectors not downloaded therefore '\n 'cannot load them')\n sswe_folder_files = list(sswe_folder.iterdir())\n if not sswe_fp.is_file():\n raise Exception('Error in either downloading the SSWE vectors'\n ' or file path names. Files in the SSWE '\n f'folder {sswe_folder_files} and where the '\n f'SSWE file should be {str(sswe_fp)}')\n return str(sswe_fp.resolve())\n\n def __init__(self, name=None, unit_length=False, skip_conf=False,\n padding_value=None, filter_words=None):\n\n vector_file = self.download(skip_conf)\n\n if name is None:\n name = 'sswe'\n super().__init__(vector_file, name=name, unit_length=unit_length,\n padding_value=padding_value,\n filter_words=filter_words)\n\n\n\nclass GloveTwitterVectors(WordVectors):\n\n def download(self, skip_conf):\n '''\n This method checks if the\n `Glove Twitter word vectors \\\n <https://nlp.stanford.edu/projects/glove/>`_\n are already in the repoistory if not it downloads and unzips the word\n vectors if permission is granted and converts them into a gensim\n KeyedVectors binary representation.\n\n :param skip_conf: Whether to skip the permission step as it requires \\\n user input. True to skip permission.\n :type skip_conf: bool\n :returns: A dict containing word vector dimension as keys and the \\\n absolute path to the vector file.\n :rtype: dict\n '''\n\n glove_folder = BELLA_VEC_DIR.joinpath('glove_twitter')\n glove_folder.mkdir(parents=True, exist_ok=True)\n current_glove_files = set([glove_file.name for glove_file\n in glove_folder.iterdir()])\n all_glove_files = set(['glove.twitter.27B.25d.binary',\n 'glove.twitter.27B.50d.binary',\n 'glove.twitter.27B.100d.binary',\n 'glove.twitter.27B.200d.binary'])\n interset = all_glove_files.intersection(current_glove_files)\n # If the files in the folder aren't all the glove files that would be\n # downloaded re-download the zip and unzip the files.\n if interset != all_glove_files:\n can_download = 'yes'\n if not skip_conf:\n download_msg = 'We are going to download the glove vectors '\\\n 'this is a large download of 1.4GB and takes '\\\n '5.4GB of disk space after being unzipped. '\\\n 'Would you like to continue? If so type '\\\n '`yes`\\n>> '\n can_download = input(download_msg)\n\n if can_download.strip().lower() == 'yes':\n download_link = 'http://nlp.stanford.edu/data/glove.twitter.'\\\n '27B.zip'\n glove_zip_path = glove_folder.joinpath('glove_zip.zip')\n # Reference:\n # http://docs.python-requests.org/en/master/user/quickstart/#raw-response-content\n with glove_zip_path.open('wb') as glove_zip_file:\n request = requests.get(download_link, stream=True)\n total_size = int(request.headers.get('content-length', 0))\n print('Downloading Glove Twitter vectors')\n for chunk in tqdm(request.iter_content(chunk_size=128),\n total=math.ceil(total_size//128)):\n glove_zip_file.write(chunk)\n print('Unzipping word vector download')\n glove_zip_path = str(glove_zip_path.resolve())\n with zipfile.ZipFile(glove_zip_path, 'r') as glove_zip_file:\n glove_zip_file.extractall(path=glove_folder)\n else:\n raise Exception('Glove Twitter vectors not downloaded '\n 'therefore cannot load them')\n\n def add_full_path(file_name):\n file_path = glove_folder.joinpath(file_name)\n return self.glove_txt_binary(file_path)\n\n return {25: add_full_path('glove.twitter.27B.25d.txt'),\n 50: add_full_path('glove.twitter.27B.50d.txt'),\n 100: add_full_path('glove.twitter.27B.100d.txt'),\n 200: add_full_path('glove.twitter.27B.200d.txt')}\n\n def __init__(self, dimension, name=None, unit_length=False,\n skip_conf=False, padding_value=None,\n filter_words=None):\n '''\n :param dimension: Dimension size of the word vectors you would like to \\\n use. Choice: 25, 50, 100, 200\n :param skip_conf: Whether to skip the permission step for downloading \\\n the word vectors as it requires user input. True to skip permission.\n :type dimension: int\n :type skip_conf: bool. Default False\n '''\n\n dimension_file = self.download(skip_conf)\n if not isinstance(dimension, int):\n raise TypeError('Type of dimension has to be int not {}'\n .format(type(dimension)))\n if dimension not in dimension_file:\n raise ValueError('Dimension avliable are the following {}'\n .format(list(dimension_file.keys())))\n if name is None:\n name = f'glove twitter {dimension}d'\n vector_file = dimension_file[dimension]\n print(f'Loading {name} from file')\n glove_key_vectors = KeyedVectors.load_word2vec_format(vector_file,\n binary=True)\n super().__init__(glove_key_vectors, name=name, unit_length=unit_length,\n padding_value=padding_value,\n filter_words=filter_words)\n\n def _unknown_vector(self):\n '''\n This is to be Overridden by sub classes if they want to return a custom\n unknown vector.\n\n :returns: A vector for all unknown words. In this case it is a zero\n vector.\n :rtype: numpy.ndarray\n '''\n\n return np.zeros(self.vector_size, dtype=np.float32)\n\n\nclass GloveCommonCrawl(WordVectors):\n\n def download(self, skip_conf, version):\n '''\n This method checks if either the `Glove Common Crawl \\\n <https://nlp.stanford.edu/projects/glove/>`_ 840 or 42 Billion token\n word vectors were downloaded already into the repoistory if not it\n downloads and unzips the 300 Dimension word vector if permission is\n granted.\n\n :param skip_conf: Whether to skip the permission step as it requires \\\n user input. True to skip permission.\n :param version: Choice of either the 42 or 840 Billion token 300 \\\n dimension common crawl glove vectors. The values can be only 42 or \\\n 840 and default is 42.\n :type skip_conf: bool\n :type version: int\n :returns: The filepath to the 300 dimension word vector\n :rtype: String\n '''\n\n glove_folder = BELLA_VEC_DIR.joinpath(f'glove_common_crawl_{version}b')\n glove_folder.mkdir(parents=True, exist_ok=True)\n glove_file_name = f'glove.{version}B.300d'\n glove_txt_fp = glove_folder.joinpath(glove_file_name + '.txt')\n glove_binary_fp = glove_folder.joinpath(glove_file_name + '.binary')\n # If the files in the folder aren't all the glove files that would be\n # downloaded re-download the zip and unzip the files.\n if not glove_binary_fp.is_file() and not glove_txt_fp.is_file():\n can_download = 'yes'\n if not skip_conf:\n download_msg = 'We are going to download the glove vectors '\\\n 'this is a large download of ~2GB and takes '\\\n '~5.6GB of diskspace after being unzipped. '\\\n 'Would you like to continue? If so type '\\\n '`yes`\\n>> '\n can_download = input(download_msg)\n\n if can_download.strip().lower() == 'yes':\n zip_file_name = f'glove.{version}B.300d.zip'\n download_link = 'http://nlp.stanford.edu/data/' + zip_file_name\n\n glove_zip_path = glove_folder.joinpath(zip_file_name)\n\n # Reference:\n # http://docs.python-requests.org/en/master/user/quickstart/#raw-response-content\n with glove_zip_path.open('wb') as glove_zip_file:\n request = requests.get(download_link, stream=True)\n total_size = int(request.headers.get('content-length', 0))\n print(f'Downloading Glove {version}B vectors')\n for chunk in tqdm(request.iter_content(chunk_size=128),\n total=math.ceil(total_size//128)):\n glove_zip_file.write(chunk)\n print('Unzipping word vector download')\n glove_zip_path = str(glove_zip_path.resolve())\n with zipfile.ZipFile(glove_zip_path, 'r') as glove_zip_file:\n glove_zip_file.extractall(path=glove_folder)\n else:\n raise Exception(f'Glove Common Crawl {version}b vectors '\n 'not downloaded therefore cannot load them')\n glove_folder_files = list(glove_folder.iterdir())\n if not glove_txt_fp.is_file():\n raise Exception('Error in either downloading the glove vectors'\n ' or file path names. Files in the glove '\n f'folder {glove_folder_files} and where the '\n f'golve file should be {str(glove_txt_fp)}')\n return self.glove_txt_binary(glove_txt_fp)\n\n def __init__(self, version=42, name=None, unit_length=False,\n skip_conf=False, padding_value=None,\n filter_words=None):\n '''\n :param version: Choice of either the 42 or 840 Billion token 300 \\\n dimension common crawl glove vectors. The values can be only 42 or \\\n 840 and default is 42.\n :param skip_conf: Whether to skip the permission step for downloading \\\n the word vectors as it requires user input. True to skip permission.\n :type version: int. Default 42.\n :type skip_conf: bool. Default False\n '''\n if version not in [42, 840]:\n raise ValueError('Common Crawl only come in two version the 840 '\n 'or 42 Billion tokens. Require to choose between '\n f'42 and 840 and not {version}')\n\n if name is None:\n name = 'glove 300d {}b common crawl'.format(version)\n vector_file = self.download(skip_conf, version)\n print(f'Loading {name} from file')\n glove_key_vectors = KeyedVectors.load_word2vec_format(vector_file,\n binary=True)\n super().__init__(glove_key_vectors, name=name, unit_length=unit_length,\n padding_value=padding_value,\n filter_words=filter_words)\n\n def _unknown_vector(self):\n '''\n This is to be Overridden by sub classes if they want to return a custom\n unknown vector.\n\n :returns: A vector for all unknown words. In this case it is a zero\n vector.\n :rtype: numpy.ndarray\n '''\n\n return np.zeros(self.vector_size, dtype=np.float32)\n\n\nclass GloveWikiGiga(WordVectors):\n\n def download(self, skip_conf):\n '''\n This method checks if the\n `Glove Wikipedia Gigaword word vectors\n <https://nlp.stanford.edu/projects/glove/>`_\n are already in the repoistory if not it downloads and unzips the word\n vectors if permission is granted and converts them into a gensim\n KeyedVectors binary representation.\n\n :param skip_conf: Whether to skip the permission step as it requires \\\n user input. True to skip permission.\n :type skip_conf: bool\n :returns: A dict containing word vector dimension as keys and the \\\n absolute path to the vector file.\n :rtype: dict\n '''\n\n glove_folder = BELLA_VEC_DIR.joinpath(f'glove_wiki_giga')\n glove_folder.mkdir(parents=True, exist_ok=True)\n current_glove_files = set([glove_file.name for glove_file\n in glove_folder.iterdir()])\n all_glove_files = set(['glove.6B.50d.binary',\n 'glove.6B.100d.binary',\n 'glove.6B.200d.binary',\n 'glove.6B.300d.binary'])\n interset = all_glove_files.intersection(current_glove_files)\n # If the files in the folder aren't all the glove files that would be\n # downloaded re-download the zip and unzip the files.\n if interset != all_glove_files:\n can_download = 'yes'\n if not skip_conf:\n download_msg = 'We are going to download the glove vectors '\\\n 'this is a large download of ~900MB and takes '\\\n '~2.1GB of disk space after being unzipped. '\\\n 'Would you like to continue? If so type '\\\n '`yes`\\n>> '\n can_download = input(download_msg)\n\n if can_download.strip().lower() == 'yes':\n download_link = 'http://nlp.stanford.edu/data/glove.6B.zip'\n glove_zip_path = glove_folder.joinpath('glove_zip.zip')\n # Reference:\n # http://docs.python-requests.org/en/master/user/quickstart/#raw-response-content\n with glove_zip_path.open('wb') as glove_zip_file:\n request = requests.get(download_link, stream=True)\n total_size = int(request.headers.get('content-length', 0))\n print('Downloading Glove Wikipedia Gigaword vectors')\n for chunk in tqdm(request.iter_content(chunk_size=128),\n total=math.ceil(total_size//128)):\n glove_zip_file.write(chunk)\n print('Unzipping word vector download')\n glove_zip_path = str(glove_zip_path.resolve())\n with zipfile.ZipFile(glove_zip_path, 'r') as glove_zip_file:\n glove_zip_file.extractall(path=glove_folder)\n else:\n raise Exception('Glove Twitter vectors not downloaded '\n 'therefore cannot load them')\n\n def add_full_path(file_name):\n file_path = glove_folder.joinpath(file_name)\n return self.glove_txt_binary(file_path)\n\n return {50: add_full_path('glove.6B.50d.txt'),\n 100: add_full_path('glove.6B.100d.txt'),\n 200: add_full_path('glove.6B.200d.txt'),\n 300: add_full_path('glove.6B.300d.txt')}\n\n def __init__(self, dimension, name=None, unit_length=False, skip_conf=False,\n padding_value=None, filter_words=None):\n '''\n :param dimension: Dimension size of the word vectors you would like to \\\n use. Choice: 50, 100, 200, 300\n :param skip_conf: Whether to skip the permission step for downloading \\\n the word vectors as it requires user input. True to skip permission.\n :type dimension: int\n :type skip_conf: bool. Default False\n '''\n\n dimension_file = self.download(skip_conf)\n if not isinstance(dimension, int):\n raise TypeError('Type of dimension has to be int not {}'\\\n .format(type(dimension)))\n if dimension not in dimension_file:\n raise ValueError('Dimension avliable are the following {}'\\\n .format(list(dimension_file.keys())))\n if name is None:\n name = 'glove wiki giga {}d'.format(dimension)\n vector_file = dimension_file[dimension]\n print(f'Loading {name} from file')\n glove_key_vectors = KeyedVectors.load_word2vec_format(vector_file,\n binary=True)\n super().__init__(glove_key_vectors, name=name, unit_length=unit_length,\n padding_value=padding_value,\n filter_words=filter_words)\n\n def _unknown_vector(self):\n '''\n This is to be Overridden by sub classes if they want to return a custom\n unknown vector.\n\n :returns: A vector for all unknown words. In this case it is a zero\n vector.\n :rtype: numpy.ndarray\n '''\n\n return np.zeros(self.vector_size, dtype=np.float32)\n" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
physycom/minimocas-tools
[ "bf1ad6f5f5f95942cc99d99e8b1a543c68f3a646" ]
[ "python/heatmap-server.py" ]
[ "#! /usr/bin/env python3\n\nfrom http.server import BaseHTTPRequestHandler\nimport os\nimport json\nimport re\nfrom matplotlib import cm\nfrom datetime import datetime\nimport pandas as pd\nimport collections\nimport numpy as np\n\ndatafile = ''\ngrid = {}\ndata = collections.defaultdict(dict)\n\nclass Server(BaseHTTPRequestHandler):\n\n def do_HEAD(self):\n return\n\n def do_POST(self):\n return\n\n def do_GET(self):\n self.respond()\n\n def serve_html(self, filename):\n f = open(os.path.dirname(os.path.realpath(__file__)) + '/html/' + filename)\n status = 200\n content_type = 'text/html; charset=ISO-8859-1'\n response_content = f.read()\n response_content = bytes(response_content, 'UTF-8')\n size = len(response_content)\n return status, content_type, response_content, size\n\n def serve_404(self):\n status = 404\n content_type = 'text/plain'\n response_content = '404 Url not found.'\n response_content = bytes(response_content, 'UTF-8')\n size = len(response_content)\n return status, content_type, response_content, size\n\n def serve_json(self, geojson):\n status = 200\n content_type = 'application/json; charset=ISO-8859-1'\n response_content = json.dumps(geojson)\n response_content = bytes(response_content, 'UTF-8')\n size = len(response_content)\n return status, content_type, response_content, size\n\n def handle_http(self):\n if self.path.endswith('.html'):\n try:\n htmlfile = self.path.split('/')[-1]\n status, content_type, response_content, size = self.serve_html(htmlfile)\n except:\n status, content_type, response_content, size = self.serve_404()\n elif self.path == '/':\n status, content_type, response_content, size = self.serve_html('index.html')\n elif self.path.startswith('/heat'):\n status, content_type, response_content, size = self.serve_html('heatmap.html')\n elif self.path.startswith('/json'):\n geojson = {\n 'type': 'FeatureCollection',\n 'features': [],\n }\n\n # sanity checks and various init\n geojson['times'] = list(map(lambda x: datetime.fromtimestamp(x).strftime(\"%Y%m%d_%H%M%S\"), data[0].keys()))\n\n for k, v in data.items():\n feat = {\n 'type': 'Feature',\n 'properties': {\n 'time_cnt' : []\n },\n 'geometry': {\n 'type': 'Point',\n 'coordinates': []\n }\n }\n feat['properties']['time_cnt'] = list(map(int, v.values()))\n feat['geometry']['coordinates'] = list(grid[k])\n geojson['features'].append(feat)\n\n# print(geojson)\n\n status, content_type, response_content, size = self.serve_json(geojson)\n else:\n status, content_type, response_content, size = self.serve_404()\n\n self.send_response(status)\n self.send_header('Content-type', content_type)\n self.send_header('Content-length', size)\n self.end_headers()\n return response_content\n\n def respond(self):\n content = self.handle_http()\n self.wfile.write(content)\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--server-address', help='http server address', default='localhost')\n parser.add_argument('-p', '--server-port', help='http server port', default=9999, type=int)\n parser.add_argument('-g', '--grid', help='grid geojson file', required=True)\n parser.add_argument('-d', '--data', help='grid data influx-ready file', required=True)\n args = parser.parse_args()\n gridfile = args.grid\n datafile = args.data\n\n with open(gridfile) as f:\n geogrid = json.load(f)\n for i in geogrid['features']:\n grid[i['properties']['id']] = np.mean(i['geometry']['coordinates'][0], 0)\n\n df = pd.read_csv(datafile, sep=' ', index_col=None, header=None)\n df['gid'] = df[0].str.split('=', expand=True)[1]\n df['cnt'] = df[1].str.split('=', expand=True)[1]\n df['ts'] = df[2] * 1e-9\n df = df[['gid','ts','cnt']].astype({'ts':'int', 'gid':'int'})\n for row in df.values:\n data[row[0]][row[1]] = row[2]\n\n import time\n from http.server import HTTPServer\n\n HOST_NAME = args.server_address\n PORT_NUMBER = args.server_port\n\n httpd = HTTPServer((HOST_NAME, PORT_NUMBER), Server)\n print(time.asctime(), 'Server UP - %s:%s' % (HOST_NAME, PORT_NUMBER))\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n httpd.server_close()\n print(time.asctime(), 'Server DOWN - %s:%s' % (HOST_NAME, PORT_NUMBER))\n" ]
[ [ "pandas.read_csv", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
zjZSTU/GoogLeNet
[ "a0801e45006d34b4901a8834397961ce17f24e2e" ]
[ "py/models/googlenet.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\n@date: 2020/4/7 下午2:47\n@file: googlenet.py\n@author: zj\n@description: GoogLeNet实现\n\"\"\"\n\nfrom __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.jit.annotations import Optional, Tuple\nfrom torch import Tensor\n\n__all__ = ['GoogLeNet']\n\n\nclass GoogLeNet(nn.Module):\n __constants__ = ['aux_logits', 'transform_input']\n\n def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, init_weights=True,\n blocks=None):\n \"\"\"\n GoogLeNet实现\n :param num_classes: 输出类别数\n :param aux_logits: 是否使用辅助分类器\n :param transform_input:\n :param init_weights:\n :param blocks:\n \"\"\"\n super(GoogLeNet, self).__init__()\n if blocks is None:\n blocks = [BasicConv2d, Inception, InceptionAux]\n assert len(blocks) == 3\n conv_block = blocks[0]\n inception_block = blocks[1]\n inception_aux_block = blocks[2]\n\n self.aux_logits = aux_logits\n self.transform_input = transform_input\n\n self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)\n self.maxpool1 = nn.MaxPool2d(3, stride=2, padding=0, ceil_mode=True)\n self.conv2 = conv_block(64, 64, kernel_size=1, stride=1, padding=0)\n self.conv3 = conv_block(64, 192, kernel_size=3, stride=1, padding=1)\n self.maxpool2 = nn.MaxPool2d(3, stride=2, padding=0, ceil_mode=True)\n\n self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)\n self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)\n self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n\n self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)\n self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)\n self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)\n self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)\n self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)\n self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)\n self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)\n\n if aux_logits:\n # 辅助分类器\n self.aux1 = inception_aux_block(512, num_classes)\n self.aux2 = inception_aux_block(528, num_classes)\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.dropout = nn.Dropout(0.2)\n self.fc = nn.Linear(1024, num_classes)\n\n if init_weights:\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n import scipy.stats as stats\n X = stats.truncnorm(-2, 2, scale=0.01)\n values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)\n values = values.view(m.weight.size())\n with torch.no_grad():\n m.weight.copy_(values)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _transform_input(self, x):\n # type: (Tensor) -> Tensor\n if self.transform_input:\n x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n x = torch.cat((x_ch0, x_ch1, x_ch2), 1)\n return x\n\n def _forward(self, x):\n # type: (Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]\n # N x 3 x 224 x 224\n x = self.conv1(x)\n # N x 64 x 112 x 112\n x = self.maxpool1(x)\n # N x 64 x 56 x 56\n x = self.conv2(x)\n # N x 64 x 56 x 56\n x = self.conv3(x)\n # N x 192 x 56 x 56\n x = self.maxpool2(x)\n\n # N x 192 x 28 x 28\n x = self.inception3a(x)\n # N x 256 x 28 x 28\n x = self.inception3b(x)\n # N x 480 x 28 x 28\n x = self.maxpool3(x)\n # N x 480 x 14 x 14\n x = self.inception4a(x)\n # N x 512 x 14 x 14\n aux_defined = self.training and self.aux_logits\n if aux_defined:\n aux1 = self.aux1(x)\n else:\n aux1 = None\n\n x = self.inception4b(x)\n # N x 512 x 14 x 14\n x = self.inception4c(x)\n # N x 512 x 14 x 14\n x = self.inception4d(x)\n # N x 528 x 14 x 14\n if aux_defined:\n aux2 = self.aux2(x)\n else:\n aux2 = None\n\n x = self.inception4e(x)\n # N x 832 x 14 x 14\n x = self.maxpool4(x)\n # N x 832 x 7 x 7\n x = self.inception5a(x)\n # N x 832 x 7 x 7\n x = self.inception5b(x)\n # N x 1024 x 7 x 7\n\n x = self.avgpool(x)\n # N x 1024 x 1 x 1\n x = torch.flatten(x, 1)\n # N x 1024\n x = self.dropout(x)\n x = self.fc(x)\n # N x 1000 (num_classes)\n return x, aux2, aux1\n\n def forward(self, x):\n x = self._transform_input(x)\n x, aux1, aux2 = self._forward(x)\n aux_defined = self.training and self.aux_logits\n if aux_defined:\n # 训练阶段返回3个分类器结果\n return x, aux2, aux1\n else:\n # 测试阶段仅使用最后一个分类器\n return x\n\n\nclass Inception(nn.Module):\n __constants__ = ['branch2', 'branch3', 'branch4']\n\n def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj,\n conv_block=None):\n super(Inception, self).__init__()\n if conv_block is None:\n conv_block = BasicConv2d\n self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1, stride=1, padding=0)\n\n self.branch2 = nn.Sequential(\n conv_block(in_channels, ch3x3red, kernel_size=1, stride=1, padding=0),\n conv_block(ch3x3red, ch3x3, kernel_size=3, stride=1, padding=1)\n )\n\n self.branch3 = nn.Sequential(\n conv_block(in_channels, ch5x5red, kernel_size=1, stride=1, padding=0),\n conv_block(ch5x5red, ch5x5, kernel_size=5, stride=1, padding=2)\n )\n\n self.branch4 = nn.Sequential(\n nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),\n conv_block(in_channels, pool_proj, kernel_size=1, stride=1, padding=0)\n )\n\n def _forward(self, x):\n branch1 = self.branch1(x)\n branch2 = self.branch2(x)\n branch3 = self.branch3(x)\n branch4 = self.branch4(x)\n\n outputs = [branch1, branch2, branch3, branch4]\n return outputs\n\n def forward(self, x):\n outputs = self._forward(x)\n return torch.cat(outputs, 1)\n\n\nclass InceptionAux(nn.Module):\n\n def __init__(self, in_channels, num_classes, conv_block=None):\n super(InceptionAux, self).__init__()\n if conv_block is None:\n conv_block = BasicConv2d\n self.conv = conv_block(in_channels, 128, kernel_size=1, stride=1, padding=0)\n\n self.fc1 = nn.Linear(2048, 1024)\n self.fc2 = nn.Linear(1024, num_classes)\n\n def forward(self, x):\n # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14\n x = F.adaptive_avg_pool2d(x, (4, 4))\n # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4\n x = self.conv(x)\n # N x 128 x 4 x 4\n x = torch.flatten(x, 1)\n # N x 2048\n x = F.relu(self.fc1(x), inplace=True)\n # N x 1024\n x = F.dropout(x, 0.7, training=self.training)\n # N x 1024\n x = self.fc2(x)\n # N x 1000 (num_classes)\n\n return x\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n # self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n # x = self.bn(x)\n return F.relu(x, inplace=True)\n" ]
[ [ "torch.nn.Dropout", "torch.cat", "torch.nn.functional.dropout", "torch.nn.init.constant_", "scipy.stats.truncnorm", "torch.nn.Conv2d", "torch.unsqueeze", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.functional.relu", "torch.nn.AdaptiveAvgPool2d", "torch.no_grad", "torch.flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zarzarj/MinkowskiEngine
[ "1c1c09d23bd2147fa41cae25fa8837290c2bd07b" ]
[ "examples/PointTransformer/PointTransformer.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom examples.PointTransformer.pointops.functions import pointops\n\n\nclass PointTransformerLayer(nn.Module):\n def __init__(self, in_planes, out_planes, share_planes=8, nsample=16):\n super().__init__()\n self.mid_planes = mid_planes = out_planes // 1\n self.out_planes = out_planes\n self.share_planes = share_planes\n self.nsample = nsample\n self.linear_q = nn.Linear(in_planes, mid_planes)\n self.linear_k = nn.Linear(in_planes, mid_planes)\n self.linear_v = nn.Linear(in_planes, out_planes)\n self.linear_p = nn.Sequential(nn.Linear(3, 3), nn.BatchNorm1d(3), nn.ReLU(inplace=True), nn.Linear(3, out_planes))\n self.linear_w = nn.Sequential(nn.BatchNorm1d(mid_planes), nn.ReLU(inplace=True),\n nn.Linear(mid_planes, mid_planes // share_planes),\n nn.BatchNorm1d(mid_planes // share_planes), nn.ReLU(inplace=True),\n nn.Linear(out_planes // share_planes, out_planes // share_planes))\n self.softmax = nn.Softmax(dim=1)\n \n def forward(self, pxo) -> torch.Tensor:\n p, x, o = pxo # (n, 3), (n, c), (b)\n x_q, x_k, x_v = self.linear_q(x), self.linear_k(x), self.linear_v(x) # (n, c)\n x_k = pointops.queryandgroup(self.nsample, p, p, x_k, None, o, o, use_xyz=True) # (n, nsample, 3+c)\n x_v = pointops.queryandgroup(self.nsample, p, p, x_v, None, o, o, use_xyz=False) # (n, nsample, c)\n p_r, x_k = x_k[:, :, 0:3], x_k[:, :, 3:]\n for i, layer in enumerate(self.linear_p): p_r = layer(p_r.transpose(1, 2).contiguous()).transpose(1, 2).contiguous() if i == 1 else layer(p_r) # (n, nsample, c)\n w = x_k - x_q.unsqueeze(1) + p_r.view(p_r.shape[0], p_r.shape[1], self.out_planes // self.mid_planes, self.mid_planes).sum(2) # (n, nsample, c)\n for i, layer in enumerate(self.linear_w): w = layer(w.transpose(1, 2).contiguous()).transpose(1, 2).contiguous() if i % 3 == 0 else layer(w)\n w = self.softmax(w) # (n, nsample, c)\n n, nsample, c = x_v.shape; s = self.share_planes\n x = ((x_v + p_r).view(n, nsample, s, c // s) * w.unsqueeze(2)).sum(1).view(n, c)\n return x\n\n\nclass TransitionDown(nn.Module):\n def __init__(self, in_planes, out_planes, stride=1, nsample=16):\n super().__init__()\n self.stride, self.nsample = stride, nsample\n if stride != 1:\n self.linear = nn.Linear(3+in_planes, out_planes, bias=False)\n self.pool = nn.MaxPool1d(nsample)\n else:\n self.linear = nn.Linear(in_planes, out_planes, bias=False)\n self.bn = nn.BatchNorm1d(out_planes)\n self.relu = nn.ReLU(inplace=True)\n \n def forward(self, pxo):\n p, x, o = pxo # (n, 3), (n, c), (b)\n if self.stride != 1:\n n_o, count = [o[0].item() // self.stride], o[0].item() // self.stride\n for i in range(1, o.shape[0]):\n count += (o[i].item() - o[i-1].item()) // self.stride\n n_o.append(count)\n n_o = torch.cuda.IntTensor(n_o)\n idx = pointops.furthestsampling(p, o, n_o) # (m)\n n_p = p[idx.long(), :] # (m, 3)\n x = pointops.queryandgroup(self.nsample, p, n_p, x, None, o, n_o, use_xyz=True) # (m, 3+c, nsample)\n x = self.relu(self.bn(self.linear(x).transpose(1, 2).contiguous())) # (m, c, nsample)\n x = self.pool(x).squeeze(-1) # (m, c)\n p, o = n_p, n_o\n else:\n x = self.relu(self.bn(self.linear(x))) # (n, c)\n return [p, x, o]\n\n\nclass TransitionUp(nn.Module):\n def __init__(self, in_planes, out_planes=None):\n super().__init__()\n if out_planes is None:\n self.linear1 = nn.Sequential(nn.Linear(2*in_planes, in_planes), nn.BatchNorm1d(in_planes), nn.ReLU(inplace=True))\n self.linear2 = nn.Sequential(nn.Linear(in_planes, in_planes), nn.ReLU(inplace=True))\n else:\n self.linear1 = nn.Sequential(nn.Linear(out_planes, out_planes), nn.BatchNorm1d(out_planes), nn.ReLU(inplace=True))\n self.linear2 = nn.Sequential(nn.Linear(in_planes, out_planes), nn.BatchNorm1d(out_planes), nn.ReLU(inplace=True))\n \n def forward(self, pxo1, pxo2=None):\n if pxo2 is None:\n _, x, o = pxo1 # (n, 3), (n, c), (b)\n x_tmp = []\n for i in range(o.shape[0]):\n if i == 0:\n s_i, e_i, cnt = 0, o[0], o[0]\n else:\n s_i, e_i, cnt = o[i-1], o[i], o[i] - o[i-1]\n x_b = x[s_i:e_i, :]\n x_b = torch.cat((x_b, self.linear2(x_b.sum(0, True) / cnt).repeat(cnt, 1)), 1)\n x_tmp.append(x_b)\n x = torch.cat(x_tmp, 0)\n x = self.linear1(x)\n else:\n p1, x1, o1 = pxo1; p2, x2, o2 = pxo2\n x = self.linear1(x1) + pointops.interpolation(p2, p1, self.linear2(x2), o2, o1)\n return x\n\n\nclass PointTransformerBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, share_planes=8, nsample=16):\n super(PointTransformerBlock, self).__init__()\n self.linear1 = nn.Linear(in_planes, planes, bias=False)\n self.bn1 = nn.BatchNorm1d(planes)\n self.transformer2 = PointTransformerLayer(planes, planes, share_planes, nsample)\n self.bn2 = nn.BatchNorm1d(planes)\n self.linear3 = nn.Linear(planes, planes * self.expansion, bias=False)\n self.bn3 = nn.BatchNorm1d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, pxo):\n p, x, o = pxo # (n, 3), (n, c), (b)\n identity = x\n x = self.relu(self.bn1(self.linear1(x)))\n x = self.relu(self.bn2(self.transformer2([p, x, o])))\n x = self.bn3(self.linear3(x))\n x += identity\n x = self.relu(x)\n return [p, x, o]\n\n\nclass PointTransformerSeg(nn.Module):\n def __init__(self, block=PointTransformerBlock, blocks=[2, 3, 4, 6, 3], in_channels=3, out_channels=20, **kwargs):\n super().__init__()\n for name, value in kwargs.items():\n if name != \"self\":\n try:\n setattr(self, name, value)\n except:\n print(name, value)\n self.c = in_channels\n self.in_planes, planes = in_channels, [32, 64, 128, 256, 512]\n fpn_planes, fpnhead_planes, share_planes = 128, 64, 8\n stride, nsample = [1, 4, 4, 4, 4], [8, 16, 16, 16, 16]\n self.enc1 = self._make_enc(block, planes[0], blocks[0], share_planes, stride=stride[0], nsample=nsample[0]) # N/1\n self.enc2 = self._make_enc(block, planes[1], blocks[1], share_planes, stride=stride[1], nsample=nsample[1]) # N/4\n self.enc3 = self._make_enc(block, planes[2], blocks[2], share_planes, stride=stride[2], nsample=nsample[2]) # N/16\n self.enc4 = self._make_enc(block, planes[3], blocks[3], share_planes, stride=stride[3], nsample=nsample[3]) # N/64\n self.enc5 = self._make_enc(block, planes[4], blocks[4], share_planes, stride=stride[4], nsample=nsample[4]) # N/256\n self.dec5 = self._make_dec(block, planes[4], 2, share_planes, nsample=nsample[4], is_head=True) # transform p5\n self.dec4 = self._make_dec(block, planes[3], 2, share_planes, nsample=nsample[3]) # fusion p5 and p4\n self.dec3 = self._make_dec(block, planes[2], 2, share_planes, nsample=nsample[2]) # fusion p4 and p3\n self.dec2 = self._make_dec(block, planes[1], 2, share_planes, nsample=nsample[1]) # fusion p3 and p2\n self.dec1 = self._make_dec(block, planes[0], 2, share_planes, nsample=nsample[0]) # fusion p2 and p1\n self.cls = nn.Sequential(nn.Linear(planes[0], planes[0]), nn.BatchNorm1d(planes[0]), nn.ReLU(inplace=True), nn.Linear(planes[0], out_channels))\n\n def _make_enc(self, block, planes, blocks, share_planes=8, stride=1, nsample=16):\n layers = []\n layers.append(TransitionDown(self.in_planes, planes * block.expansion, stride, nsample))\n self.in_planes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.in_planes, self.in_planes, share_planes, nsample=nsample))\n return nn.Sequential(*layers)\n\n def _make_dec(self, block, planes, blocks, share_planes=8, nsample=16, is_head=False):\n layers = []\n layers.append(TransitionUp(self.in_planes, None if is_head else planes * block.expansion))\n self.in_planes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.in_planes, self.in_planes, share_planes, nsample=nsample))\n return nn.Sequential(*layers)\n\n def forward(self, batch, return_feats=False):\n # p0, x0, o0 = pxo # (n, 3), (n, c), (b)\n p0, x0, o0 = batch['coords'][:,1:].contiguous(), batch['feats'], batch['num_pts'].int()\n o0 = o0.cumsum(dim=0, dtype=torch.int32)\n # print(p0, p0.shape, x0, x0.shape, o0.shape, o0)\n # x0 = p0 if self.c == 3 else torch.cat((p0, x0), 1)\n p1, x1, o1 = self.enc1([p0, x0, o0])\n p2, x2, o2 = self.enc2([p1, x1, o1])\n p3, x3, o3 = self.enc3([p2, x2, o2])\n p4, x4, o4 = self.enc4([p3, x3, o3])\n p5, x5, o5 = self.enc5([p4, x4, o4])\n x5 = self.dec5[1:]([p5, self.dec5[0]([p5, x5, o5]), o5])[1]\n x4 = self.dec4[1:]([p4, self.dec4[0]([p4, x4, o4], [p5, x5, o5]), o4])[1]\n x3 = self.dec3[1:]([p3, self.dec3[0]([p3, x3, o3], [p4, x4, o4]), o3])[1]\n x2 = self.dec2[1:]([p2, self.dec2[0]([p2, x2, o2], [p3, x3, o3]), o2])[1]\n x1 = self.dec1[1:]([p1, self.dec1[0]([p1, x1, o1], [p2, x2, o2]), o1])[1]\n x = self.cls(x1)\n if return_feats:\n return x, x1\n return x\n\n @staticmethod\n def add_argparse_args(parent_parser):\n parser = parent_parser.add_argument_group(\"PointTransformer\")\n # parser.add_argument('--block', type=int, default=2e4)\n # parser.add_argument('--blocks', type=int, default=2e4)\n return parent_parser\n\n def convert_sync_batchnorm(self):\n pass\n\n\n# def pointtransformer_seg_repro(**kwargs):\n# model = PointTransformerSeg(PointTransformerBlock, [2, 3, 4, 6, 3], **kwargs)\n# return model" ]
[ [ "torch.nn.Softmax", "torch.nn.BatchNorm1d", "torch.nn.Sequential", "torch.cat", "torch.cuda.IntTensor", "torch.nn.Linear", "torch.nn.MaxPool1d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neonsecret/Real-Time-Voice-Cloning
[ "07442ca1e73957440b3c8c55b50b124fb813db55" ]
[ "synthesizer/g2p/__init__.py" ]
[ "import re\n\nimport torch\nimport threading\nimport _thread\n\nfrom alphabet_detector import AlphabetDetector\nfrom contextlib import contextmanager\n\nfrom .config import DataConfigEn, DataConfigRu, ModelConfigEn, ModelConfigRu, TestConfigEn, TestConfigRu\nfrom .data import PersianLexicon\nfrom .model import Encoder, Decoder\n\n\ndef load_model(model_path, model, lang):\n model.load_state_dict(torch.load(\n model_path,\n map_location=lambda storage,\n loc: storage\n ))\n model.to(TestConfigEn.device if lang == \"en\" else TestConfigRu.device)\n model.eval()\n return model\n\n\nclass G2P(object):\n def __init__(self, lang):\n # data\n self.DataConfig = DataConfigEn if lang == \"en\" else DataConfigRu\n self.ModelConfig = ModelConfigEn if lang == \"en\" else ModelConfigRu\n self.ds = PersianLexicon(\n self.DataConfig.graphemes_path,\n self.DataConfig.phonemes_path,\n self.DataConfig.lexicon_path\n )\n\n # model\n self.encoder_model = Encoder(\n self.ModelConfig.graphemes_size,\n self.ModelConfig.hidden_size\n ).to(TestConfigEn.device if lang == \"en\" else TestConfigRu.device)\n load_model(TestConfigEn.encoder_model_path if lang == \"en\" else TestConfigRu.encoder_model_path,\n self.encoder_model, lang)\n\n self.decoder_model = Decoder(\n self.ModelConfig.phonemes_size,\n self.ModelConfig.hidden_size\n ).to(TestConfigEn.device if lang == \"en\" else TestConfigRu.device)\n load_model(TestConfigEn.decoder_model_path if lang == \"en\" else TestConfigRu.decoder_model_path,\n self.decoder_model, lang)\n self.lang = lang\n\n def __call__(self, word):\n x = [0] + [self.ds.g2idx[ch] for ch in word] + [1]\n x = torch.tensor(x).long().unsqueeze(1).to(TestConfigEn.device if self.lang == \"en\" else TestConfigRu.device)\n with torch.no_grad():\n enc = self.encoder_model(x)\n\n phonemes, att_weights = [], []\n x = torch.zeros(1, 1).long().to(TestConfigEn.device if self.lang == \"en\" else TestConfigRu.device)\n hidden = torch.ones(\n 1,\n 1,\n self.ModelConfig.hidden_size\n ).to(TestConfigEn.device if self.lang == \"en\" else TestConfigRu.device)\n t = 0\n while True:\n with torch.no_grad():\n # print(x.device, enc.device, hidden.device)\n out, hidden, att_weight = self.decoder_model(\n x,\n enc,\n hidden\n )\n\n att_weights.append(att_weight.detach().cpu())\n max_index = out[0, 0].argmax()\n x = max_index.unsqueeze(0).unsqueeze(0)\n t += 1\n\n phonemes.append(self.ds.idx2p[max_index.item()])\n if max_index.item() == 1:\n break\n\n return phonemes\n\n\nru_g2p = G2P(lang=\"ru\")\nen_g2p = G2P(lang=\"en\")\nad = AlphabetDetector()\n\n\ndef normalize_repetitions(word):\n chars = [\"\"]\n for ch in word:\n if chars[-1] != ch:\n chars.append(ch)\n return \"\".join(chars)\n\n\nclass TimeoutException(Exception):\n def __init__(self, msg=''):\n self.msg = msg\n\n\n@contextmanager\ndef time_limit(seconds, msg=''):\n timer = threading.Timer(seconds, lambda: _thread.interrupt_main())\n timer.start()\n try:\n yield\n except KeyboardInterrupt:\n raise TimeoutException(\"Timed out for operation {}\".format(msg))\n finally:\n # if the action ends in specified time, timer is canceled\n timer.cancel()\n\n\ndef g2p_all(word, dl_logger):\n if ad.is_latin(word):\n ourg2p = en_g2p\n word = word.upper()\n word = normalize_repetitions(word) # because of some words like чшшшшш\n else: # elif ad.is_cyrillic(word):\n ourg2p = ru_g2p\n try:\n with time_limit(2):\n res = ourg2p(word)\n except TimeoutException:\n dl_logger.log(\"WARNING\", data={\n \"timed out\": word\n })\n res = ourg2p(word[:3]) # will do for some noises\n except Exception: #\n syms = \"абвгдеёжзийклмнопрстуфхцчшщъыьэюя!.,:-\" if ourg2p == ru_g2p else \"abcdefghijklmnopqrstuvwxyz!.,:-\"\n res = ourg2p(\"\".join(s if s in syms else \"\" for s in word))\n dl_logger.log(\"WARNING\", data={\n \"fixed the word\": word\n })\n return res\n\n\ndef s2ids(sentence, dl_logger):\n words = [\"\".join(s if s in \"абвгдеёжзийклмнопрстуфхцчшщъыьэюяabcdefghijklmnopqrstuvwxyz!.,:-\" else \"\" for s in word)\n for word in re.split(regexPattern, sentence)]\n return [g2p_all(word, dl_logger) if word not in delims else word for word in words]\n\n\nclass ShortLogger:\n def __init__(self):\n pass\n\n def log(self, *args, **kwargs):\n print(args, kwargs)\n\n\ninited = False\n\n\ndef g2p_main(sentence):\n if not inited:\n init()\n ids = s2ids(sentence, dl_logger)\n return [item for sublist in ids for item in sublist]\n\n\ndef init(dl_logger_=None):\n global dl_logger, inited, delims, regexPattern\n delims = [\",\", \".\", \" \", \"!\", \":\", \"-\"]\n regexPattern = '|'.join('(?={})'.format(re.escape(delim)) for delim in delims)\n if dl_logger_ is None:\n try:\n from synthesizer.models.tacotron_tweaked.train import dl_logger\n except Exception as e:\n print(e)\n dl_logger = ShortLogger()\n else:\n dl_logger = dl_logger_\n inited = True\n\n" ]
[ [ "torch.ones", "torch.zeros", "torch.load", "torch.tensor", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kiwikuma/modred
[ "989ac1881fbd8e24e57ca6dd26ee08432e874754" ]
[ "examples/hermite.py" ]
[ "\"\"\"\nSpectral differentiation from J.A.C. Weideman and S.C. Reddy 1998, ACM TOMS.\n\"\"\"\n\nimport numpy as N\nimport numpy.ma as ma\n\ndef herroots(n):\n \"\"\"Returns the roots of the Hermite polynomial of degree n.\"\"\"\n # Jacobi matrix\n J = N.diag(N.arange(1, n)**0.5, 1) + N.diag(N.arange(1, n)**0.5, -1)\n return N.sort(N.linalg.eigvalsh(J)) / (2**0.5)\n \n\ndef herdif(n, m, b):\n \"\"\"Computes differentiation matrices D1, D2, ..., Dm on Hermite points.\n \n Args:\n n: Number of points, which is also the order of accuracy.\n m: Number of derivative matrices to return.\n b: Scaling parameter. Real and positive.\n \n Returns:\n x: Array of nodes, zeros of Hermite polynomial of degree n, scaled by b.\n Dm: A list s.t. Dm[i] is the (i+1)-th derivative matrix, i=0...m-1.\n \n Note: 0 < m < n-1.\n \"\"\"\n x = herroots(n)\n # Compute weights\n alpha = N.exp(-x**2 / 2.)\n # Set up beta matrix s.t. beta[i,j] = \n # ( (i+1)-th derivative of alpha(x) )/alpha(x), evaluated at x = x(j).\n beta = N.zeros((m+1, x.shape[0]))\n beta[0] = 1.0\n beta[1] = -x\n for i in range(2, m+1):\n beta[i] = -x * beta[i-1] - (i-1) * beta[i-2]\n # Remove initializing row from beta\n beta = N.delete(beta, 0, 0)\n # Compute differentiation matrix (b=1).\n Dm = poldif(x, alpha=alpha, B=beta)\n # Scale nodes by the factor b.\n x = x/b\n # Adjust derivatives for b not equal to 1.\n for i in range(1, m+1):\n Dm[i-1] *= b**i\n \n return x, Dm\n\n\ndef poldif(x, m=None, alpha=None, B=None):\n \"\"\"\n Computes the differentiation matrices D1, D2, ..., Dm on arbitrary nodes.\n \n The function is called with either keyword argument m OR \n keyword args alpha and B.\n If m is given, then the weight function is constant.\n If alpha and B are given, then the weights are defined by alpha and B.\n \n Args:\n x: 1D array of n distinct nodes.\n \n Kwargs:\n m: Number of derivatives.\n alpha: 1D array of weight values alpha[x], evaluated at x = x[k].\n B: Array of size m x n where B[i,j] = beta[i,j] = ((i+1)-th derivative\n of alpha(x))/alpha(x), evaluated at x = x[j].\n \n Returns:\n Dm: A list s.t. Dm[i] is the (i+1)-th derivative matrix, i=0...m-1.\n \n Note: 0 < m < n-1.\n \"\"\"\n x = x.flatten()\n n = x.shape[0]\n if m is not None and B is None and alpha is None: \n alpha = N.ones(n)\n B = N.zeros((m, n))\n elif m is None and B is not None and alpha is not None:\n alpha = alpha.flatten()\n m = B.shape[0]\n else:\n raise RuntimeError('Keyword args to poldif are inconsistent.')\n \n XX = N.tile(x, (n, 1)).transpose()\n # DX contains entries x[k] - x[j].\n DX = XX - XX.transpose()\n # Put 1's one the main diagonal.\n N.fill_diagonal(DX, 1.)\n \n # C has entries c[k]/c[j].\n c = alpha * N.prod(DX, 1)\n C = N.tile(c, (n, 1)).transpose()\n C = C/C.transpose()\n \n # Z has entries 1/(x[k]-x[j])\n Z = 1./DX\n N.fill_diagonal(Z, 0.)\n \n # X is Z' but with the diagonal entries removed.\n X = Z.transpose()\n X = ma.array(X.transpose(), mask=N.identity(n)).compressed().\\\n reshape((n, n-1)).transpose()\n \n # Y is matrix of cumulative sums and D is a differentiation matrix.\n Y = N.ones((n, n))\n D = N.eye(n)\n Dm = []\n for i in range(1, m+1):\n # Diagonals\n Y = N.cumsum(N.concatenate((B[i-1].reshape((1,n)), \n i * Y[0:n-1] * X), axis=0), axis=0)\n # Off-diagonals\n D = i * Z * (C * N.tile(N.diag(D), (n,1)).transpose() - D)\n # Correct the diagonal\n D.flat[::n+1] = Y[-1]\n Dm.append(D)\n \n return Dm\n\n" ]
[ [ "numpy.diag", "numpy.arange", "numpy.eye", "numpy.tile", "numpy.ones", "numpy.delete", "numpy.identity", "numpy.fill_diagonal", "numpy.prod", "numpy.linalg.eigvalsh", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WLM1ke/PortfolioOptimizer
[ "477430951984e9018143ce09b01ea96aa490ea40" ]
[ "src/local/moex/tests/test_index.py" ]
[ "import arrow\nimport pandas as pd\n\nfrom local.moex import iss_index\nfrom local.moex.iss_index import IndexDataManager\nfrom web.labels import CLOSE_PRICE\n\n\ndef test_index():\n df = iss_index.index()\n assert isinstance(df, pd.Series)\n assert df.name == CLOSE_PRICE\n assert df.index.is_monotonic_increasing\n assert df.index.is_unique\n assert df.index[0] == pd.to_datetime('2003-02-26')\n assert df.shape[0] > 100\n assert df.loc['2018-03-16'] == 3281.58\n\n\ndef test_download_all():\n manager = IndexDataManager()\n df = manager.value\n assert df.equals(manager.download_all())\n\n\ndef test_update():\n manager = IndexDataManager()\n df = manager.value\n time0 = arrow.now()\n assert manager.last_update < time0\n manager.update()\n assert manager.last_update > time0\n assert df.equals(manager.value)\n" ]
[ [ "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
hungyiwu/mixed-distance
[ "12d933d834ae79bf3512a688c31be760b4be9322" ]
[ "code/autoencoder.py" ]
[ "import tensorflow as tf\nimport tensorflow.keras as tfk\n\n\nclass conv_ae(tfk.Model):\n def __init__(\n self,\n input_shape: tuple,\n latent_dim: int,\n num_conv_layer: int,\n num_conv_filter: int,\n **kwargs\n ):\n super(conv_ae, self).__init__(**kwargs)\n conv_params = dict(\n filters=num_conv_filter,\n kernel_size=3,\n activation=\"relu\",\n padding=\"same\",\n strides=(1, 1),\n )\n\n # encoder\n encoder_input = tfk.Input(shape=input_shape)\n x = encoder_input\n\n for _ in range(num_conv_layer):\n x = tfk.layers.Conv2D(**conv_params)(x)\n x = tfk.layers.Conv2D(**conv_params)(x)\n x = tfk.layers.Conv2D(**conv_params)(x)\n x = tfk.layers.MaxPool2D(pool_size=(2, 2))(x)\n\n intermediate_shape = x.shape[1:]\n x = tfk.layers.Flatten()(x)\n encoder_output = tfk.layers.Dense(units=latent_dim)(x)\n self.encoder = tfk.Model(encoder_input, encoder_output, name=\"encoder\")\n\n # decoder\n decoder_input = tfk.Input(shape=(latent_dim,))\n x = tfk.layers.Dense(\n units=tf.math.reduce_prod(intermediate_shape), activation=\"relu\"\n )(decoder_input)\n x = tfk.layers.Reshape(intermediate_shape)(x)\n\n for _ in range(num_conv_layer):\n x = tfk.layers.Conv2DTranspose(**conv_params)(x)\n x = tfk.layers.Conv2DTranspose(**conv_params)(x)\n x = tfk.layers.Conv2DTranspose(**conv_params)(x)\n x = tfk.layers.UpSampling2D(size=(2, 2))(x)\n\n decoder_output = tfk.layers.Conv2DTranspose(\n filters=input_shape[2],\n kernel_size=3,\n activation=\"sigmoid\",\n padding=\"same\",\n strides=(1, 1),\n )(x)\n self.decoder = tfk.Model(decoder_input, decoder_output, name=\"decoder\")\n\n # loss fn\n self.loss_fn = tfk.losses.MAE\n\n def train_step(self, data):\n x, y = data\n with tf.GradientTape() as tape:\n y_pred = self.decoder(self.encoder(x))\n reconstruction_loss = tf.reduce_mean(self.loss_fn(y, y_pred))\n gradients = tape.gradient(reconstruction_loss, self.trainable_weights)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_weights))\n return {\"loss\": reconstruction_loss}\n\n def test_step(self, data):\n x, y = data\n y_pred = self.decoder(self.encoder(x))\n reconstruction_loss = tf.reduce_mean(self.loss_fn(y, y_pred))\n return {\"loss\": reconstruction_loss}\n\n def call(self, x):\n return self.decoder(self.encoder(x))\n" ]
[ [ "tensorflow.keras.Input", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2DTranspose", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.UpSampling2D", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.Model", "tensorflow.keras.layers.Reshape", "tensorflow.keras.layers.Flatten", "tensorflow.math.reduce_prod", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
coolsidd/ActivityNet
[ "f442b853a483606654ed415369acf8b6dbd3e958" ]
[ "Crawler/Kinetics/download.py" ]
[ "import argparse\nimport glob\nimport json\nimport os\nimport shutil\nimport subprocess\nimport uuid\nimport csv\nfrom collections import OrderedDict\nfrom tqdm import tqdm\nfrom joblib import delayed\nfrom joblib import Parallel\nimport pandas as pd\n\n\ndef create_video_folders(dataset, output_dir, tmp_dir):\n \"\"\"Creates a directory for each label name in the dataset.\"\"\"\n if 'label-name' not in dataset.columns:\n this_dir = os.path.join(output_dir, 'test')\n dataset[\"label-name\"] = [0 for x in range(len(dataset))]\n if not os.path.exists(this_dir):\n os.makedirs(this_dir)\n # I should return a dict but ...\n return this_dir\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n\n label_to_dir = {}\n for label_name in dataset['label-name'].unique():\n this_dir = os.path.join(output_dir, label_name)\n if not os.path.exists(this_dir):\n os.makedirs(this_dir)\n label_to_dir[label_name] = this_dir\n return label_to_dir\n\n\ndef construct_video_filename(row, label_to_dir, trim_format='%06d'):\n \"\"\"Given a dataset row, this function constructs the\n output filename for a given video.\n \"\"\"\n basename = '%s_%s_%s.mp4' % (row['video-id'],\n trim_format % row['start-time'],\n trim_format % row['end-time'])\n if not isinstance(label_to_dir, dict):\n dirname = label_to_dir\n else:\n dirname = label_to_dir[row['label-name']]\n output_filename = os.path.join(dirname, basename)\n return output_filename\n\n\ndef download_clip(video_identifier, output_filename,\n start_time, end_time, out_height=256, out_width=-1,\n tmp_dir='/tmp/kinetics',\n # output = subprocess.check_output(command_resize, shell=True,\n # stderr=subprocess.STDOUT)\n num_attempts=5,\n url_base='https://www.youtube.com/watch?v='):\n \"\"\"Download a video from youtube if exists and is not blocked.\n\n arguments:\n ---------\n video_identifier: str\n Unique YouTube video identifier (11 characters)\n output_filename: str\n File path where the video will be stored.\n start_time: float\n Indicates the begining time in seconds from where the video\n will be trimmed.\n end_time: float\n Indicates the ending time in seconds of the trimmed video.\n \"\"\"\n # Defensive argument checking.\n assert isinstance(video_identifier, str), 'video_identifier must be string'\n assert isinstance(output_filename, str), 'output_filename must be string'\n assert len(video_identifier) == 11, 'video_identifier must have length 11'\n\n status = False\n # Construct command line for getting the direct video link.\n tmp_filename = os.path.join(tmp_dir,\n '%s.%%(ext)s' % uuid.uuid4())\n command = ['youtube-dl',\n '--quiet', '--no-warnings',\n '-f', 'mp4',\n '-o', '\"%s\"' % tmp_filename,\n '\"%s\"' % (url_base + video_identifier)]\n command = ' '.join(command)\n attempts = 0\n while True:\n try:\n output = subprocess.check_output(command, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as err:\n attempts += 1\n if attempts == num_attempts:\n return status, err.output.decode(\"utf-8\")\n else:\n break\n\n tmp_filename = glob.glob('%s*' % tmp_filename.split('.')[0])[0]\n # Construct command to trim the videos (ffmpeg required).\n # Example command\n # ffmpeg -i /tmp/kinetics/11d602ad-b859-434a-8770-d120dd96f348.mp4 -ss 0 -t 10 -c:v libx264 -c:a copy -threads 1 Dataset/validate/washing feet/--GkrdYZ9Tc_000000_000010.mp4\n command = ['ffmpeg',\n '-i', '\"%s\"' % tmp_filename,\n '-ss', str(start_time),\n '-t', str(end_time - start_time),\n '-filter:v scale=\"trunc(oh*a/2)*2:%s\"' % out_height,\n '-c:v', 'libx264', '-c:a', 'copy',\n '-threads', '1',\n '-loglevel', 'panic',\n '\"%s\"' % output_filename]\n command = ' '.join(command)\n try:\n output = subprocess.check_output(command, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as err:\n print(\"Exception!\")\n print(output.decode(\"utf-8\"))\n print(command)\n return status, err.output.decode(\"utf-8\")\n\n # Check if the video was successfully saved.\n status = os.path.exists(output_filename)\n os.remove(tmp_filename)\n return status, 'Downloaded'\n\n\ndef download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir):\n \"\"\"Wrapper for parallel processing purposes.\"\"\"\n output_filename = construct_video_filename(row, label_to_dir,\n trim_format)\n \n clip_id = os.path.basename(output_filename).split('.mp4')[0]\n if os.path.exists(output_filename):\n status = tuple([clip_id, True, 'Exists', output_filename, row['label-name']])\n return status\n\n downloaded, log = download_clip(row['video-id'], output_filename,\n row['start-time'], row['end-time'],\n tmp_dir=tmp_dir)\n status = tuple([clip_id, downloaded, log, output_filename, row['label-name']])\n return status\n\n\ndef parse_kinetics_annotations(input_csv, ignore_is_cc=False):\n \"\"\"Returns a parsed DataFrame.\n\n arguments:\n ---------\n input_csv: str\n Path to CSV file containing the following columns:\n 'YouTube Identifier,Start time,End time,Class label'\n\n returns:\n -------\n dataset: DataFrame\n Pandas with the following columns:\n 'video-id', 'start-time', 'end-time', 'label-name'\n \"\"\"\n df = pd.read_csv(input_csv)\n if 'youtube_id' in df.columns:\n columns = OrderedDict([\n ('youtube_id', 'video-id'),\n ('time_start', 'start-time'),\n ('time_end', 'end-time'),\n ('label', 'label-name')])\n df.rename(columns=columns, inplace=True)\n if ignore_is_cc:\n df = df.loc[:, df.columns.tolist()[:-1]]\n return df\n\n\ndef main(input_csv, output_dir,\n trim_format='%06d', num_jobs=24, tmp_dir='/tmp/kinetics',\n drop_duplicates=False, total=-1, label_csv=None):\n\n # Reading and parsing Kinetics.\n dataset = parse_kinetics_annotations(input_csv)\n # if os.path.isfile(drop_duplicates):\n # print('Attempt to remove duplicates')\n # old_dataset = parse_kinetics_annotations(drop_duplicates,\n # ignore_is_cc=True)\n # df = pd.concat([dataset, old_dataset], axis=0, ignore_index=True)\n # df.drop_duplicates(inplace=True, keep=False)\n # print(dataset.shape, old_dataset.shape)\n # dataset = df\n # print(dataset.shape)\n if total == -1:\n total = len(dataset)\n dataset = dataset[:total]\n if label_csv is None or not os.path.exists(label_csv):\n import requests\n import io\n print(\"Downloading csv\")\n # TODO add input param for csv\n url = 'https://gist.githubusercontent.com/willprice/f19da185c9c5f32847134b87c1960769/raw/9dc94028ecced572f302225c49fcdee2f3d748d8/kinetics_400_labels.csv'\n r = requests.get(url, allow_redirects=True)\n labels_dict = {y:x for x,y in csv.reader(io.StringIO(r.content.decode(\"utf-8\")))}\n # print(labels_dict)\n else:\n with open(label_csv,\"r\") as labels_file:\n labels_dict = {y:x for x,y in csv.reader(labels_file)}\n labels_dict[0]=0\n # Creates folders where videos will be saved later.\n label_to_dir = create_video_folders(dataset, output_dir, tmp_dir)\n # Download all clips.\n csv_file = open(os.path.join(output_dir, \"dataset.csv\"), \"w\")\n if num_jobs == 1:\n status_lst = []\n for i, row in tqdm(dataset[:total].iterrows(),total=total):\n status_lst.append(download_clip_wrapper(row, label_to_dir,\n trim_format, tmp_dir))\n else:\n status_lst = Parallel(n_jobs=num_jobs)(delayed(download_clip_wrapper)(\n row, label_to_dir,\n trim_format, tmp_dir) for i, row in tqdm(dataset[:total].iterrows(),total=total))\n\n csv_writer = csv.DictWriter(csv_file, fieldnames=[\"path\",\"label\"])\n for status in status_lst:\n if status[1]:\n # print(\"writing row...\")\n csv_writer.writerow({\"path\":os.path.abspath(status[3]), \"label\":labels_dict[status[4]]})\n csv_file.close()\n # Clean tmp dir.\n if os.path.exists(tmp_dir):\n shutil.rmtree(tmp_dir)\n\n # Save download report.\n with open('download_report.json', 'w') as fobj:\n try:\n fobj.write(json.dumps(status_lst))\n except:\n print(status_lst)\n\n\nif __name__ == '__main__':\n description = 'Helper script for downloading and trimming kinetics videos.'\n p = argparse.ArgumentParser(description=description)\n p.add_argument('input_csv', type=str,\n help=('CSV file containing the following format: '\n 'YouTube Identifier,Start time,End time,Class label'))\n p.add_argument('output_dir', type=str,\n help='Output directory where videos will be saved.')\n p.add_argument('--label_csv', type=str, default=None,\n help=('CSV file containing the following format: '\n 'Id, Class label'),)\n p.add_argument('-f', '--trim-format', type=str, default='%06d',\n help=('This will be the format for the '\n 'filename of trimmed videos: '\n 'videoid_%0xd(start_time)_%0xd(end_time).mp4'))\n p.add_argument('-n', '--num-jobs', type=int, default=24)\n p.add_argument('-t', '--tmp-dir', type=str, default='/tmp/kinetics')\n p.add_argument('--total',type=int, default=-1)\n\n p.add_argument('--drop-duplicates', type=str, default='non-existent',\n help='Unavailable at the moment')\n # help='CSV file of the previous version of Kinetics.')\n main(**vars(p.parse_args()))\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
xKHUNx/xgboost
[ "e3aa7f1441e87b039a5db9a27e86bd60a017cd55" ]
[ "python-package/xgboost/dask.py" ]
[ "# pylint: disable=too-many-arguments, too-many-locals\n\"\"\"Dask extensions for distributed training. See\nhttps://xgboost.readthedocs.io/en/latest/tutorials/dask.html for simple\ntutorial. Also xgboost/demo/dask for some examples.\n\nThere are two sets of APIs in this module, one is the functional API including\n``train`` and ``predict`` methods. Another is stateful Scikit-Learner wrapper\ninherited from single-node Scikit-Learn interface.\n\nThe implementation is heavily influenced by dask_xgboost:\nhttps://github.com/dask/dask-xgboost\n\n\"\"\"\nimport platform\nimport logging\nfrom collections import defaultdict\nfrom threading import Thread\n\nimport numpy\n\nfrom . import rabit\n\nfrom .compat import DASK_INSTALLED\nfrom .compat import distributed_get_worker, distributed_wait, distributed_comm\nfrom .compat import da, dd, delayed, get_client\nfrom .compat import sparse, scipy_sparse\nfrom .compat import PANDAS_INSTALLED, DataFrame, Series, pandas_concat\nfrom .compat import CUDF_INSTALLED, CUDF_DataFrame, CUDF_Series, CUDF_concat\nfrom .compat import lazy_isinstance\n\nfrom .core import DMatrix, Booster, _expect\nfrom .training import train as worker_train\nfrom .tracker import RabitTracker\nfrom .sklearn import XGBModel, XGBRegressorBase, XGBClassifierBase\nfrom .sklearn import xgboost_model_doc\n\n# Current status is considered as initial support, many features are\n# not properly supported yet.\n#\n# TODOs:\n# - Callback.\n# - Label encoding.\n# - CV\n# - Ranking\n\n\nLOGGER = logging.getLogger('[xgboost.dask]')\n\n\ndef _start_tracker(host, n_workers):\n \"\"\"Start Rabit tracker \"\"\"\n env = {'DMLC_NUM_WORKER': n_workers}\n rabit_context = RabitTracker(hostIP=host, nslave=n_workers)\n env.update(rabit_context.slave_envs())\n\n rabit_context.start(n_workers)\n thread = Thread(target=rabit_context.join)\n thread.daemon = True\n thread.start()\n return env\n\n\ndef _assert_dask_support():\n if not DASK_INSTALLED:\n raise ImportError(\n 'Dask needs to be installed in order to use this module')\n if platform.system() == 'Windows':\n msg = 'Windows is not officially supported for dask/xgboost,'\n msg += ' contribution are welcomed.'\n LOGGER.warning(msg)\n\n\nclass RabitContext:\n '''A context controling rabit initialization and finalization.'''\n def __init__(self, args):\n self.args = args\n worker = distributed_get_worker()\n self.args.append(\n ('DMLC_TASK_ID=[xgboost.dask]:' + str(worker.address)).encode())\n\n def __enter__(self):\n rabit.init(self.args)\n LOGGER.debug('-------------- rabit say hello ------------------')\n\n def __exit__(self, *args):\n rabit.finalize()\n LOGGER.debug('--------------- rabit say bye ------------------')\n\n\ndef concat(value): # pylint: disable=too-many-return-statements\n '''To be replaced with dask builtin.'''\n if isinstance(value[0], numpy.ndarray):\n return numpy.concatenate(value, axis=0)\n if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):\n return scipy_sparse.vstack(value, format='csr')\n if sparse and isinstance(value[0], sparse.SparseArray):\n return sparse.concatenate(value, axis=0)\n if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):\n return pandas_concat(value, axis=0)\n if CUDF_INSTALLED and isinstance(value[0], (CUDF_DataFrame, CUDF_Series)):\n return CUDF_concat(value, axis=0)\n if lazy_isinstance(value[0], 'cupy.core.core', 'ndarray'):\n import cupy # pylint: disable=import-error\n # pylint: disable=c-extension-no-member,no-member\n d = cupy.cuda.runtime.getDevice()\n for v in value:\n d_v = v.device.id\n assert d_v == d, 'Concatenating arrays on different devices.'\n return cupy.concatenate(value, axis=0)\n return dd.multi.concat(list(value), axis=0)\n\n\ndef _xgb_get_client(client):\n '''Simple wrapper around testing None.'''\n if not isinstance(client, (type(get_client()), type(None))):\n raise TypeError(\n _expect([type(get_client()), type(None)], type(client)))\n ret = get_client() if client is None else client\n return ret\n\n\ndef _get_client_workers(client):\n workers = client.scheduler_info()['workers']\n return workers\n\n\nclass DaskDMatrix:\n # pylint: disable=missing-docstring, too-many-instance-attributes\n '''DMatrix holding on references to Dask DataFrame or Dask Array. Constructing\n a `DaskDMatrix` forces all lazy computation to be carried out. Wait for\n the input data explicitly if you want to see actual computation of\n constructing `DaskDMatrix`.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n client: dask.distributed.Client\n Specify the dask client used for training. Use default client\n returned from dask if it's set to None.\n data : dask.array.Array/dask.dataframe.DataFrame\n data source of DMatrix.\n label: dask.array.Array/dask.dataframe.DataFrame\n label used for trainin.\n missing : float, optional\n Value in the input data (e.g. `numpy.ndarray`) which needs\n to be present as a missing value. If None, defaults to np.nan.\n weight : dask.array.Array/dask.dataframe.DataFrame\n Weight for each instance.\n feature_names : list, optional\n Set names for features.\n feature_types : list, optional\n Set types for features\n\n '''\n\n def __init__(self,\n client,\n data,\n label=None,\n missing=None,\n weight=None,\n feature_names=None,\n feature_types=None):\n _assert_dask_support()\n client = _xgb_get_client(client)\n\n self.feature_names = feature_names\n self.feature_types = feature_types\n self.missing = missing\n\n if len(data.shape) != 2:\n raise ValueError(\n 'Expecting 2 dimensional input, got: {shape}'.format(\n shape=data.shape))\n\n if not isinstance(data, (dd.DataFrame, da.Array)):\n raise TypeError(_expect((dd.DataFrame, da.Array), type(data)))\n if not isinstance(label, (dd.DataFrame, da.Array, dd.Series,\n type(None))):\n raise TypeError(\n _expect((dd.DataFrame, da.Array, dd.Series), type(label)))\n\n self.worker_map = None\n self.has_label = label is not None\n self.has_weights = weight is not None\n\n client.sync(self.map_local_data, client, data, label, weight)\n\n async def map_local_data(self, client, data, label=None, weights=None):\n '''Obtain references to local data.'''\n\n def inconsistent(left, left_name, right, right_name):\n msg = 'Partitions between {a_name} and {b_name} are not ' \\\n 'consistent: {a_len} != {b_len}. ' \\\n 'Please try to repartition/rechunk your data.'.format(\n a_name=left_name, b_name=right_name, a_len=len(left),\n b_len=len(right)\n )\n return msg\n\n def check_columns(parts):\n # x is required to be 2 dim in __init__\n assert parts.ndim == 1 or parts.shape[1], 'Data should be' \\\n ' partitioned by row. To avoid this specify the number' \\\n ' of columns for your dask Array explicitly. e.g.' \\\n ' chunks=(partition_size, X.shape[1])'\n\n data = data.persist()\n if label is not None:\n label = label.persist()\n if weights is not None:\n weights = weights.persist()\n # Breaking data into partitions, a trick borrowed from dask_xgboost.\n\n # `to_delayed` downgrades high-level objects into numpy or pandas\n # equivalents.\n X_parts = data.to_delayed()\n if isinstance(X_parts, numpy.ndarray):\n check_columns(X_parts)\n X_parts = X_parts.flatten().tolist()\n\n if label is not None:\n y_parts = label.to_delayed()\n if isinstance(y_parts, numpy.ndarray):\n check_columns(y_parts)\n y_parts = y_parts.flatten().tolist()\n if weights is not None:\n w_parts = weights.to_delayed()\n if isinstance(w_parts, numpy.ndarray):\n check_columns(w_parts)\n w_parts = w_parts.flatten().tolist()\n\n parts = [X_parts]\n if label is not None:\n assert len(X_parts) == len(\n y_parts), inconsistent(X_parts, 'X', y_parts, 'labels')\n parts.append(y_parts)\n if weights is not None:\n assert len(X_parts) == len(\n w_parts), inconsistent(X_parts, 'X', w_parts, 'weights')\n parts.append(w_parts)\n parts = list(map(delayed, zip(*parts)))\n\n parts = client.compute(parts)\n await distributed_wait(parts) # async wait for parts to be computed\n\n for part in parts:\n assert part.status == 'finished'\n\n self.partition_order = {}\n for i, part in enumerate(parts):\n self.partition_order[part.key] = i\n\n key_to_partition = {part.key: part for part in parts}\n who_has = await client.scheduler.who_has(\n keys=[part.key for part in parts])\n\n worker_map = defaultdict(list)\n for key, workers in who_has.items():\n worker_map[next(iter(workers))].append(key_to_partition[key])\n\n self.worker_map = worker_map\n\n def get_worker_x_ordered(self, worker):\n list_of_parts = self.worker_map[worker.address]\n client = get_client()\n list_of_parts_value = client.gather(list_of_parts)\n result = []\n for i, part in enumerate(list_of_parts):\n result.append((list_of_parts_value[i][0],\n self.partition_order[part.key]))\n return result\n\n def get_worker_parts(self, worker):\n '''Get mapped parts of data in each worker.'''\n list_of_parts = self.worker_map[worker.address]\n assert list_of_parts, 'data in ' + worker.address + ' was moved.'\n assert isinstance(list_of_parts, list)\n\n # `get_worker_parts` is launched inside worker. In dask side\n # this should be equal to `worker._get_client`.\n client = get_client()\n list_of_parts = client.gather(list_of_parts)\n\n if self.has_label:\n if self.has_weights:\n data, labels, weights = zip(*list_of_parts)\n else:\n data, labels = zip(*list_of_parts)\n weights = None\n else:\n data = [d[0] for d in list_of_parts]\n labels = None\n weights = None\n return data, labels, weights\n\n def get_worker_data(self, worker):\n '''Get data that local to worker.\n\n Parameters\n ----------\n worker: The worker used as key to data.\n\n Returns\n -------\n A DMatrix object.\n\n '''\n if worker.address not in set(self.worker_map.keys()):\n msg = 'worker {address} has an empty DMatrix. ' \\\n 'All workers associated with this DMatrix: {workers}'.format(\n address=worker.address,\n workers=set(self.worker_map.keys()))\n LOGGER.warning(msg)\n d = DMatrix(numpy.empty((0, 0)),\n feature_names=self.feature_names,\n feature_types=self.feature_types)\n return d\n\n data, labels, weights = self.get_worker_parts(worker)\n\n data = concat(data)\n\n if self.has_label:\n labels = concat(labels)\n else:\n labels = None\n if self.has_weights:\n weights = concat(weights)\n else:\n weights = None\n dmatrix = DMatrix(data,\n labels,\n weight=weights,\n missing=self.missing,\n feature_names=self.feature_names,\n feature_types=self.feature_types,\n nthread=worker.nthreads)\n return dmatrix\n\n def get_worker_data_shape(self, worker):\n '''Get the shape of data X in each worker.'''\n data, _, _ = self.get_worker_parts(worker)\n\n shapes = [d.shape for d in data]\n rows = 0\n cols = 0\n for shape in shapes:\n rows += shape[0]\n\n c = shape[1]\n assert cols in (0, c), 'Shape between partitions are not the' \\\n ' same. Got: {left} and {right}'.format(left=c, right=cols)\n cols = c\n return (rows, cols)\n\n\ndef _get_rabit_args(worker_map, client):\n '''Get rabit context arguments from data distribution in DaskDMatrix.'''\n host = distributed_comm.get_address_host(client.scheduler.address)\n\n env = client.run_on_scheduler(_start_tracker, host.strip('/:'),\n len(worker_map))\n rabit_args = [('%s=%s' % item).encode() for item in env.items()]\n return rabit_args\n\n# train and predict methods are supposed to be \"functional\", which meets the\n# dask paradigm. But as a side effect, the `evals_result` in single-node API\n# is no longer supported since it mutates the input parameter, and it's not\n# intuitive to sync the mutation result. Therefore, a dictionary containing\n# evaluation history is instead returned.\n\n\ndef train(client, params, dtrain, *args, evals=(), **kwargs):\n '''Train XGBoost model.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n client: dask.distributed.Client\n Specify the dask client used for training. Use default client\n returned from dask if it's set to None.\n \\\\*\\\\*kwargs:\n Other parameters are the same as `xgboost.train` except for\n `evals_result`, which is returned as part of function return value\n instead of argument.\n\n Returns\n -------\n results: dict\n A dictionary containing trained booster and evaluation history.\n `history` field is the same as `eval_result` from `xgboost.train`.\n\n .. code-block:: python\n\n {'booster': xgboost.Booster,\n 'history': {'train': {'logloss': ['0.48253', '0.35953']},\n 'eval': {'logloss': ['0.480385', '0.357756']}}}\n\n '''\n _assert_dask_support()\n client = _xgb_get_client(client)\n if 'evals_result' in kwargs.keys():\n raise ValueError(\n 'evals_result is not supported in dask interface.',\n 'The evaluation history is returned as result of training.')\n\n workers = list(_get_client_workers(client).keys())\n\n rabit_args = _get_rabit_args(workers, client)\n\n def dispatched_train(worker_addr):\n '''Perform training on a single worker.'''\n LOGGER.info('Training on %s', str(worker_addr))\n worker = distributed_get_worker()\n with RabitContext(rabit_args):\n local_dtrain = dtrain.get_worker_data(worker)\n\n local_evals = []\n if evals:\n for mat, name in evals:\n if mat is dtrain:\n local_evals.append((local_dtrain, name))\n continue\n local_mat = mat.get_worker_data(worker)\n local_evals.append((local_mat, name))\n\n local_history = {}\n local_param = params.copy() # just to be consistent\n msg = 'Overriding `nthreads` defined in dask worker.'\n if 'nthread' in local_param.keys() and \\\n local_param['nthread'] is not None and \\\n local_param['nthread'] != worker.nthreads:\n msg += '`nthread` is specified. ' + msg\n LOGGER.warning(msg)\n elif 'n_jobs' in local_param.keys() and \\\n local_param['n_jobs'] is not None and \\\n local_param['n_jobs'] != worker.nthreads:\n msg = '`n_jobs` is specified. ' + msg\n LOGGER.warning(msg)\n else:\n local_param['nthread'] = worker.nthreads\n bst = worker_train(params=local_param,\n dtrain=local_dtrain,\n *args,\n evals_result=local_history,\n evals=local_evals,\n **kwargs)\n ret = {'booster': bst, 'history': local_history}\n if local_dtrain.num_row() == 0:\n ret = None\n return ret\n\n futures = client.map(dispatched_train,\n workers,\n pure=False,\n workers=workers)\n results = client.gather(futures)\n return list(filter(lambda ret: ret is not None, results))[0]\n\n\ndef predict(client, model, data, *args, missing=numpy.nan):\n '''Run prediction with a trained booster.\n\n .. note::\n\n Only default prediction mode is supported right now.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n client: dask.distributed.Client\n Specify the dask client used for training. Use default client\n returned from dask if it's set to None.\n model: A Booster or a dictionary returned by `xgboost.dask.train`.\n The trained model.\n data: DaskDMatrix/dask.dataframe.DataFrame/dask.array.Array\n Input data used for prediction.\n missing: float\n Used when input data is not DaskDMatrix. Specify the value\n considered as missing.\n\n Returns\n -------\n prediction: dask.array.Array/dask.dataframe.Series\n\n '''\n _assert_dask_support()\n client = _xgb_get_client(client)\n if isinstance(model, Booster):\n booster = model\n elif isinstance(model, dict):\n booster = model['booster']\n else:\n raise TypeError(_expect([Booster, dict], type(model)))\n if not isinstance(data, (DaskDMatrix, da.Array, dd.DataFrame)):\n raise TypeError(_expect([DaskDMatrix, da.Array, dd.DataFrame],\n type(data)))\n\n def mapped_predict(partition, is_df):\n worker = distributed_get_worker()\n m = DMatrix(partition, missing=missing, nthread=worker.nthreads)\n predt = booster.predict(m, *args, validate_features=False)\n if is_df:\n predt = DataFrame(predt, columns=['prediction'])\n return predt\n\n if isinstance(data, da.Array):\n predictions = client.submit(\n da.map_blocks,\n mapped_predict, data, False, drop_axis=1,\n dtype=numpy.float32\n ).result()\n return predictions\n if isinstance(data, dd.DataFrame):\n predictions = client.submit(\n dd.map_partitions,\n mapped_predict, data, True,\n meta=dd.utils.make_meta({'prediction': 'f4'})\n ).result()\n return predictions.iloc[:, 0]\n\n # Prediction on dask DMatrix.\n worker_map = data.worker_map\n\n def dispatched_predict(worker_id):\n '''Perform prediction on each worker.'''\n LOGGER.info('Predicting on %d', worker_id)\n worker = distributed_get_worker()\n list_of_parts = data.get_worker_x_ordered(worker)\n predictions = []\n booster.set_param({'nthread': worker.nthreads})\n for part, order in list_of_parts:\n local_x = DMatrix(part,\n feature_names=data.feature_names,\n feature_types=data.feature_types,\n missing=data.missing,\n nthread=worker.nthreads)\n predt = booster.predict(data=local_x,\n validate_features=local_x.num_row() != 0,\n *args)\n ret = (delayed(predt), order)\n predictions.append(ret)\n return predictions\n\n def dispatched_get_shape(worker_id):\n '''Get shape of data in each worker.'''\n LOGGER.info('Trying to get data shape on %d', worker_id)\n worker = distributed_get_worker()\n list_of_parts = data.get_worker_x_ordered(worker)\n shapes = []\n for part, order in list_of_parts:\n shapes.append((part.shape, order))\n return shapes\n\n def map_function(func):\n '''Run function for each part of the data.'''\n futures = []\n for wid in range(len(worker_map)):\n list_of_workers = [list(worker_map.keys())[wid]]\n f = client.submit(func, wid,\n pure=False,\n workers=list_of_workers)\n futures.append(f)\n\n # Get delayed objects\n results = client.gather(futures)\n results = [t for l in results for t in l] # flatten into 1 dim list\n # sort by order, l[0] is the delayed object, l[1] is its order\n results = sorted(results, key=lambda l: l[1])\n results = [predt for predt, order in results] # remove order\n return results\n\n results = map_function(dispatched_predict)\n shapes = map_function(dispatched_get_shape)\n\n # Constructing a dask array from list of numpy arrays\n # See https://docs.dask.org/en/latest/array-creation.html\n arrays = []\n for i, shape in enumerate(shapes):\n arrays.append(da.from_delayed(results[i], shape=(shape[0], ),\n dtype=numpy.float32))\n predictions = da.concatenate(arrays, axis=0)\n return predictions\n\n\ndef inplace_predict(client, model, data,\n iteration_range=(0, 0),\n predict_type='value',\n missing=numpy.nan):\n '''Inplace prediction.\n\n .. versionadded:: 1.1.0\n\n Parameters\n ----------\n client: dask.distributed.Client\n Specify the dask client used for training. Use default client\n returned from dask if it's set to None.\n model: Booster/dict\n The trained model.\n iteration_range: tuple\n Specify the range of trees used for prediction.\n predict_type: str\n * 'value': Normal prediction result.\n * 'margin': Output the raw untransformed margin value.\n missing: float\n Value in the input data which needs to be present as a missing\n value. If None, defaults to np.nan.\n Returns\n -------\n prediction: dask.array.Array\n '''\n _assert_dask_support()\n client = _xgb_get_client(client)\n if isinstance(model, Booster):\n booster = model\n elif isinstance(model, dict):\n booster = model['booster']\n else:\n raise TypeError(_expect([Booster, dict], type(model)))\n if not isinstance(data, (da.Array, dd.DataFrame)):\n raise TypeError(_expect([da.Array, dd.DataFrame], type(data)))\n\n def mapped_predict(data, is_df):\n worker = distributed_get_worker()\n booster.set_param({'nthread': worker.nthreads})\n prediction = booster.inplace_predict(\n data,\n iteration_range=iteration_range,\n predict_type=predict_type,\n missing=missing)\n if is_df:\n if lazy_isinstance(data, 'cudf.core.dataframe', 'DataFrame'):\n import cudf # pylint: disable=import-error\n prediction = cudf.DataFrame({'prediction': prediction},\n dtype=numpy.float32)\n else:\n # If it's from pandas, the partition is a numpy array\n prediction = DataFrame(prediction, columns=['prediction'],\n dtype=numpy.float32)\n return prediction\n\n if isinstance(data, da.Array):\n predictions = client.submit(\n da.map_blocks,\n mapped_predict, data, False, drop_axis=1,\n dtype=numpy.float32\n ).result()\n return predictions\n if isinstance(data, dd.DataFrame):\n predictions = client.submit(\n dd.map_partitions,\n mapped_predict, data, True,\n meta=dd.utils.make_meta({'prediction': 'f4'})\n ).result()\n return predictions.iloc[:, 0]\n\n\ndef _evaluation_matrices(client, validation_set, sample_weights, missing):\n '''\n Parameters\n ----------\n validation_set: list of tuples\n Each tuple contains a validation dataset including input X and label y.\n E.g.:\n\n .. code-block:: python\n\n [(X_0, y_0), (X_1, y_1), ... ]\n\n sample_weights: list of arrays\n The weight vector for validation data.\n\n Returns\n -------\n evals: list of validation DMatrix\n '''\n evals = []\n if validation_set is not None:\n assert isinstance(validation_set, list)\n for i, e in enumerate(validation_set):\n w = (sample_weights[i]\n if sample_weights is not None else None)\n dmat = DaskDMatrix(client=client, data=e[0], label=e[1], weight=w,\n missing=missing)\n evals.append((dmat, 'validation_{}'.format(i)))\n else:\n evals = None\n return evals\n\n\nclass DaskScikitLearnBase(XGBModel):\n '''Base class for implementing scikit-learn interface with Dask'''\n\n _client = None\n\n # pylint: disable=arguments-differ\n def fit(self,\n X,\n y,\n sample_weights=None,\n eval_set=None,\n sample_weight_eval_set=None,\n verbose=True):\n '''Fit the regressor.\n\n Parameters\n ----------\n X : array_like\n Feature matrix\n y : array_like\n Labels\n sample_weight : array_like\n instance weights\n eval_set : list, optional\n A list of (X, y) tuple pairs to use as validation sets, for which\n metrics will be computed.\n Validation metrics will help us track the performance of the model.\n sample_weight_eval_set : list, optional\n A list of the form [L_1, L_2, ..., L_n], where each L_i is a list\n of group weights on the i-th validation set.\n verbose : bool\n If `verbose` and an evaluation set is used, writes the evaluation\n metric measured on the validation set to stderr.'''\n raise NotImplementedError\n\n def predict(self, data): # pylint: disable=arguments-differ\n '''Predict with `data`.\n Parameters\n ----------\n data: data that can be used to construct a DaskDMatrix\n Returns\n -------\n prediction : dask.array.Array'''\n raise NotImplementedError\n\n @property\n def client(self):\n '''The dask client used in this model.'''\n client = _xgb_get_client(self._client)\n return client\n\n @client.setter\n def client(self, clt):\n self._client = clt\n\n@xgboost_model_doc(\"\"\"Implementation of the Scikit-Learn API for XGBoost.\"\"\",\n ['estimators', 'model'])\nclass DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):\n # pylint: disable=missing-docstring\n def fit(self,\n X,\n y,\n sample_weights=None,\n eval_set=None,\n sample_weight_eval_set=None,\n verbose=True):\n _assert_dask_support()\n dtrain = DaskDMatrix(client=self.client,\n data=X, label=y, weight=sample_weights,\n missing=self.missing)\n params = self.get_xgb_params()\n evals = _evaluation_matrices(self.client,\n eval_set, sample_weight_eval_set,\n self.missing)\n\n results = train(self.client, params, dtrain,\n num_boost_round=self.get_num_boosting_rounds(),\n evals=evals, verbose_eval=verbose)\n # pylint: disable=attribute-defined-outside-init\n self._Booster = results['booster']\n # pylint: disable=attribute-defined-outside-init\n self.evals_result_ = results['history']\n return self\n\n def predict(self, data): # pylint: disable=arguments-differ\n _assert_dask_support()\n test_dmatrix = DaskDMatrix(client=self.client, data=data,\n missing=self.missing)\n pred_probs = predict(client=self.client,\n model=self.get_booster(), data=test_dmatrix)\n return pred_probs\n\n\n@xgboost_model_doc(\n 'Implementation of the scikit-learn API for XGBoost classification.',\n ['estimators', 'model']\n)\nclass DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):\n # pylint: disable=missing-docstring\n _client = None\n\n def fit(self,\n X,\n y,\n sample_weights=None,\n eval_set=None,\n sample_weight_eval_set=None,\n verbose=True):\n _assert_dask_support()\n dtrain = DaskDMatrix(client=self.client,\n data=X, label=y, weight=sample_weights,\n missing=self.missing)\n params = self.get_xgb_params()\n\n # pylint: disable=attribute-defined-outside-init\n if isinstance(y, (da.Array)):\n self.classes_ = da.unique(y).compute()\n else:\n self.classes_ = y.drop_duplicates().compute()\n self.n_classes_ = len(self.classes_)\n\n if self.n_classes_ > 2:\n params[\"objective\"] = \"multi:softprob\"\n params['num_class'] = self.n_classes_\n else:\n params[\"objective\"] = \"binary:logistic\"\n\n evals = _evaluation_matrices(self.client,\n eval_set, sample_weight_eval_set,\n self.missing)\n results = train(self.client, params, dtrain,\n num_boost_round=self.get_num_boosting_rounds(),\n evals=evals, verbose_eval=verbose)\n self._Booster = results['booster']\n # pylint: disable=attribute-defined-outside-init\n self.evals_result_ = results['history']\n return self\n\n def predict(self, data): # pylint: disable=arguments-differ\n _assert_dask_support()\n test_dmatrix = DaskDMatrix(client=self.client, data=data,\n missing=self.missing)\n pred_probs = predict(client=self.client,\n model=self.get_booster(), data=test_dmatrix)\n return pred_probs\n" ]
[ [ "numpy.concatenate", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dmitryshendryk/mask_rcnn_carplate
[ "c0b8de45f3ce95712140259a7ad520e118dee0ed" ]
[ "workspace/eval_new.py" ]
[ "import os\nimport sys\nimport time\nimport numpy as np\nimport imgaug # https://github.com/aleju/imgaug (pip3 install imgaug)\nimport json\nimport skimage\nimport cv2\nfrom mrcnn import visualize\nfrom PIL import ImageEnhance\nimport matplotlib.pyplot as plt\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\nclass lp_Config(Config):\n NAME = \"plate\"\n IMAGES_PER_GPU = 1\n NUM_CLASSES =2 # COCO has 80 classes\n\n STEPS_PER_EPOCH = 100\n BACKBONE = 'resnet101'\n\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n IMAGE_MIN_DIM = int(480)\n IMAGE_MAX_DIM = int(640)\n RPN_ANCHOR_SCALES = (16,24,32,48,64)\n RPN_ANCHOR_RATIOS = [ 1, 3,6 ]\n MEAN_PIXEL = np.array([123.7, 116.8, 103.9])\n\n DETECTION_NMS_THRESHOLD =0.5\n DETECTION_MIN_CONFIDENCE = 0.5\n RPN_NMS_THRESHOLD = 0.5\n TRAIN_ROIS_PER_IMAGE = 200\n RPN_TRAIN_ANCHORS_PER_IMAGE=256\n\nclass char_Config(Config):\n\n NAME = \"char\"\n IMAGES_PER_GPU = 1\n NUM_CLASSES =34 # COCO has 80 classes\n\n STEPS_PER_EPOCH = 100\n BACKBONE = 'resnet101'\n\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n RPN_NMS_THRESHOLD = 0.5\n DETECTION_MIN_CONFIDENCE = 0\n DETECTION_NMS_THRESHOLD = 0.6\n\n\n IMAGE_MIN_DIM = int(256)\n IMAGE_MAX_DIM = int(640)\n\n\n\n\ndef space_NMS(box_a,box_b):#((x1,y1),(x2,y2))\n width_a=abs(box_a[0][0]-box_a[1][0])\n width_b=abs(box_b[0][0]-box_b[1][0])\n height_a=abs(box_a[0][1]-box_a[1][1])\n height_b=abs(box_b[0][1]-box_b[1][1])\n size_a=width_a*height_a\n size_b=width_b*height_b\n start_x=max(box_a[0][0],box_b[0][0])\n end_x=min(box_a[1][0],box_b[1][0])\n start_y = max(box_a[0][1], box_b[0][1])\n end_y= min(box_a[1][1], box_b[1][1])\n\n #size_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1])\n center_a=((box_a[0][0]+box_a[1][0])/2,(box_a[0][1]+box_a[1][1])/2)\n center_b=((box_b[0][0]+box_b[1][0])/2,(box_b[0][1]+box_b[1][1])/2)\n if start_x>end_x or start_y>end_y:\n #no overlap\n #print(center_a,center_b)\n return False\n else:\n\n # overlapsize=((width_a+width_b)-2*(abs(center_a[0]-center_b[0])))*((height_a+height_b)-2*(abs(center_a[1]-center_b[1])))\n # overlapsize=(0.5*(width_a+width_b)-(center_b[0]-center_a[0]))*(0.5*(height_a+height_b)-(center_b[1]-center_a[1]))\n overlapsize=abs(end_x-start_x)*abs(end_y-start_y)\n #print(\"overlapsize: \", overlapsize, \" size_b: \", size_b)\n if overlapsize>=0.7*size_b or overlapsize>=0.7*size_a:\n\n return True\n else:\n return False\n\n\ndef aggregate(line,labels,scores,boxs,h_thershold):\n opt_label=[]\n temps=[]\n #print(line,labels,scores,boxs)\n sum_score = 0\n while(len(line)):\n mark = []\n pos=line[0][0]\n label=labels[0]\n score=scores[0]\n box=boxs[0]\n #mark.append(0)\n\n for i in range(1,len(line),1):\n if not space_NMS(box,boxs[i]):\n mark.append(i)\n elif scores[i]>score:\n #print(\"label: \", label)\n label=labels[i]\n score=scores[i]\n else:\n #print(\"label: \",labels[i])\n continue\n newline=[]\n newlabels=[]\n newscores=[]\n newbox=[]\n #print(mark)\n\n for i in mark:\n newline.append(line[i])\n newlabels.append(labels[i])\n newscores.append(scores[i])\n\n newbox.append(boxs[i])\n line=newline\n labels=newlabels\n scores=newscores\n boxs=newbox\n sum_score +=score\n temps.append((pos,label))\n #mark.clear()\n temps.sort(key=lambda tu:tu[0])\n for t in temps:\n opt_label.append(t[1])\n return opt_label,sum_score\nimport skimage.transform as st\nimport math\n\ndef find_line(point_img):\n h, theta, d = st.hough_line(point_img)\n k = -1\n # in same theata the difference of d should less than the thersehold\n b_sum = 9999\n for j in range(h.shape[1]): # d\n all_dis = h[:, j]\n\n previous = -1\n alldis = []\n\n for i in range(len(all_dis)):\n apperance = all_dis[i]\n while (apperance):\n alldis.append(d[i])\n apperance -= 1\n temp_d = alldis[0]\n sum = 0\n for i in range(1, len(alldis), 1):\n sum += abs(alldis[i] - alldis[i - 1])\n temp_d+=alldis[i]\n if sum < b_sum:\n k = theta[j]\n b = temp_d/len(alldis)\n b_sum = sum\n\n return k,b\n\ndef Seperate_V(centers, imgsize, boxs, scores, labels):\n output_lines = []\n output_labels = []\n output_boxs = []\n output_scores = []\n if (len(centers) < 2):\n return output_lines, output_labels, output_scores, output_boxs\n point_img = np.zeros((imgsize[0], imgsize[1]))\n\n for center in centers:\n point_img[int(center[1]), int(center[0])] = 255\n # cv2.imshow(\" \", point_img)\n # cv2.waitKey(0)\n h, theta, d = st.hough_line(point_img)\n k = -1\n b = []\n\n # in same theata the difference of d should less than the thersehold\n first_line = []\n second_line = []\n average = 9999\n\n left = list(range(0, 60, 1))\n right = list(range(120, 180, 1))\n\n pos_angle = left + right\n # 在可能的角度内去寻找一个最窄的range\n # print(pos_angle)\n # print(theta/(3.141592658)*180)\n\n #for j in range(h.shape[1]):\n for j in pos_angle:\n all_dis = h[:, j]\n\n previous = -1\n alldis = []\n\n for i in range(len(all_dis)):\n apperance = all_dis[i]\n while (apperance):\n alldis.append(d[i])\n apperance -= 1\n th = 2 # 不允许超过0.1\n count = 0\n #print(\"alldis\",alldis)\n temp_d = [alldis[0]]\n sum = 0\n for i in range(1, len(alldis), 1):\n sum += abs(alldis[i] - alldis[i - 1])\n if abs(alldis[i] - alldis[i - 1]) > th:\n temp_d.append(alldis[i])\n count += 1\n temp_average = sum / len(alldis)\n if count <= 1 and temp_average < average:\n k = theta[j]\n b = temp_d\n average = temp_average\n # if count<=1:\n # #print(j,temp_d)\n # k=j\n # b=temp_d\n # break\n\n print(k,b)\n if not len(b):\n return output_lines, output_labels, output_scores, output_boxs\n if len(b) == 1:\n output_lines = [centers]\n output_boxs = [boxs]\n output_labels = [labels]\n output_scores = [scores]\n else:\n if k == 0:\n k = 1\n cos = math.cos(k)\n sin = math.sin(k)\n output_lines = [[], []]\n output_labels = [[], []]\n output_boxs = [[], []]\n output_scores = [[], []]\n for i in range(len(centers)):\n # print(cos/sin*i[0]+b[0]/sin,cos/sin*i[0]+b[1]/sin)\n if abs(centers[i][1] + cos / sin * centers[i][0] - b[0] / sin) > abs(\n centers[i][1] + cos / sin * centers[i][0] - b[1] / sin):\n output_lines[0].append(centers[i])\n output_labels[0].append(labels[i])\n output_boxs[0].append(boxs[i])\n output_scores[0].append(scores[i])\n else:\n output_lines[1].append(centers[i])\n output_labels[1].append(labels[i])\n output_boxs[1].append(boxs[i])\n output_scores[1].append(scores[i])\n\n\n\n\n #以下分别对上下两排的边缘进行检测\n check=[]\n\n for index in range(len(output_lines)):\n all=[]\n chas=[]\n for i in range(len(output_lines[index])):\n\n temp=[output_lines[index][i],output_labels[index][i],output_boxs[index][i],output_scores[index][i]]\n all.append(temp)\n if len(all)<3:\n check.append(all)\n continue\n #all=zip(line,label,box,score)\n all.sort(key=lambda p:p[0][0])\n # 去除明显高度不对的box\n # average_heights=sum(t[2][1][1]-t[2][0][1] for t in all )/len(all)\n # for t in all:\n\n\n\n\n #NMS\n mark=[]\n prev = all[0]\n for k in range(1,len(all),1):\n now=all[k]\n if space_NMS(now[2],prev[2]):\n if now[3]>prev[3]:\n mark.append(k-1)\n prev=now\n else:\n mark.append(k)\n else:\n prev=now\n new=[]\n for i in range(len(all)):\n if not i in mark:\n new.append(all[i])\n\n all=new\n\n\n\n left=None\n right=None\n print(all)\n if (all[0][1]=='1' or all[0][1]=='J' or all[0][1]=='Y' or all[0][1]=='T' or all[0][1]=='7'):\n left=all[0]\n if all[len(all)-1][1]=='1' or all[len(all)-1][1]=='J' or all[len(all)-1][1]=='Y' or all[len(all)-1][1]=='T' or all[len(all)-1][1]=='7':\n right=all[len(all)-1]\n\n start=0\n end=len(all)\n if left:\n start=1\n if right:\n end=len(all)-1\n center=all[start:end]\n\n if len(center)<2:\n check.append(all)\n continue\n average_height = np.sum(t[2][1][1] - t[2][0][1] for t in center) / len(center)\n point_img = np.zeros((imgsize[0], imgsize[1]))\n for p in center:\n point_img[int(p[0][1]), int(p[0][0])] = 255\n # cv2.imshow(\" \", point_img)\n # cv2.waitKey(0)\n k, b = find_line(point_img)\n cos = math.cos(k)\n sin = math.sin(k)\n if left :\n left_point=left[0]\n height = abs(left[2][0][1] - left[2][1][1])\n print(\"left cal_y\", abs(cos / sin * left_point[0] - b / sin), \"real y:\", left_point[1])\n print(\"height \",height,\" average_height\",average_height)\n if abs(height - average_height )> average_height * 0.3:\n left = None\n elif abs(abs(left_point[1])-abs(cos / sin *left_point[0] - b / sin))>1.5 and left[3]<0.98:\n\n left=None\n else:\n print(\"left score: \",\n left[3])\n\n else:\n print(\"left is clear\")\n\n if right:\n right_point=right[0]\n height = abs(right[2][0][1] - right[2][1][1])\n print(\"right cal_y\",abs( cos / sin *right_point[0] - b / sin),\"real y:\",right_point[1])\n print(\"height \", height, \" average_height\", average_height)\n if abs(height - average_height )> average_height * 0.3:\n right = None\n\n\n elif abs(abs(right_point[1])-abs (cos / sin * right_point[0] - b / sin)) > 1.5 and right[3]<0.98:\n right = None\n else:\n print(\"right score: \", right[3] )\n\n else:\n print(\"right is clear\")\n temp=center\n\n\n if left:\n temp=[left]+center\n if right:\n temp+=[right]\n check.append(temp)\n #print(\"result is\",check)\n output_lines=[]\n output_labels=[]\n output_boxs=[]\n output_scores=[]\n for i in check:\n line=[]\n label=[]\n box=[]\n score=[]\n for j in i:\n line.append(j[0])\n label.append(j[1])\n #print(\"j is \",j)\n box.append(j[2])\n score.append(j[3])\n output_lines.append(line)\n output_labels.append(label)\n output_boxs.append(box)\n output_scores.append(score)\n\n\n\n return output_lines, output_labels, output_scores, output_boxs\n\n\n\n\n\n\n\n\n\n\n\n\n # print(first_line,\" \",second_line)\n\n # fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(8, 6))\n # plt.tight_layout()\n #\n # # 显示原始图片\n # ax0.imshow(point_img, plt.cm.gray)\n # ax0.set_title('Input image')\n # ax0.set_axis_off()\n #\n # # 显示hough变换所得数据\n # ax1.imshow(np.log(1 + h))\n # ax1.set_title('Hough transform')\n # ax1.set_xlabel('Angles (degrees)')\n # ax1.set_ylabel('Distance (pixels)')\n # ax1.axis('image')\n #\n # # row1, col1 = point_img.shape\n # # for _, angle, dist in zip(*st.hough_line_peaks(h, theta, d)):\n # # y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)\n # # y1 = (dist - col1 * np.cos(angle)) / np.sin(angle)\n # # ax2.plot((0, col1), (y0, y1), '-r')\n # # ax2.axis((0, col1, row1, 0))\n # # ax2.set_title('Detected lines')\n # # ax2.set_axis_off()\n # #\n # # plt.show()\n #\n #\n #\n # #ax2.imshow(point_img, plt.cm.gray)\n # row1, col1 = point_img.shape\n # print(row1,col1)\n # angle=k\n # for dist in b:\n # y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)\n #\n # y1 = (dist - col1 * np.cos(angle)) / np.sin(angle)\n # print(y0, y1)\n # ax2.plot((0, col1), (y0, y1), '-r')\n # ax2.axis((0, col1, row1, 0))\n # ax2.set_title('Detected lines')\n # ax2.set_axis_off()\n # plt.show()\n\ndef sequence(labels,boxs,scores,v_thershold,h_thershold,size=0):\n #first determine wether the car plate is two lines\n is_two_lines=False\n\n centers=[]\n for box in boxs:\n center=[(box[0][0]+box[1][0])/2.0,(box[0][1]+box[1][1])/2.0]\n centers.append(center)\n # check y\n la=[]\n sc=[]\n lines=[]\n all_boxes=[]\n output=[]\n #print(centers,labels,scores)\n\n lines,la,sc,all_boxes=Seperate_V(centers,size,boxs,scores,labels)\n # for i in range(len(centers)):\n # center=centers[i]\n # cur_la=labels[i]\n # cur_sc=scores[i]\n # cur_box=boxs[i]\n # if len(lines)==0: #first\n # line_one=[]\n # label_one=[]\n # sc_one=[]\n # box_one=[]\n #\n # line_one.append(center)\n # lines.append(line_one)\n #\n # label_one.append(cur_la)\n # la.append(label_one)\n #\n # sc_one.append(cur_sc)\n # sc.append(sc_one)\n #\n # box_one.append(cur_box)\n # all_boxes.append(box_one)\n #\n # else:\n # new_lines=True\n # for i in range(len(lines)):\n # is_new_line=True\n # for k in range(len(lines[i])):\n # if abs(center[1]-lines[i][k][1])<v_thershold:\n # lines[i].append(center)\n # la[i].append(cur_la)\n # sc[i].append(cur_sc)\n # all_boxes[i].append(cur_box)\n # is_new_line=False\n # break\n # if not is_new_line:\n # new_lines=False\n # break\n # if new_lines:\n # new_line = []\n # new_label = []\n # new_score = []\n # new_box=[]\n #\n # new_line.append(center)\n # lines.append(new_line)\n #\n # new_label.append(cur_la)\n # la.append(new_label)\n #\n # new_score.append(cur_sc)\n # sc.append(new_score)\n #\n # new_box.append(cur_box)\n # all_boxes.append(new_box)\n #\n # #erase the out_lair\n\n newline=lines\n newscores=sc\n newlabels=la\n newboxs=all_boxes\n # for i in range(len(lines)):\n # line=lines[i]\n # score=sc[i]\n # label=la[i]\n # c_box=all_boxes[i]\n # #print(c_box)\n # if len(line)>=2: #at least 2\n # newline.append(line)\n # newscores.append(score)\n # newlabels.append(label)\n # newboxs.append(c_box)\n #determine x\n sum_score=0\n for i in range(len(newline)):\n\n line=newline[i]\n label_line=newlabels[i]\n score_line=newscores[i]\n box_line=newboxs[i]\n code,line_score=aggregate(line,label_line,score_line,box_line,h_thershold)\n sum_score+=line_score\n output.append(code)\n count = 0\n #print(\"sum...\",sum_score)\n for l in newline:\n count+=len(l)\n if not count:\n average_score=0\n else:\n average_score=sum_score/count\n\n if len(output)>2:\n #print(output)\n output=[]\n average_score=0\n return output,average_score\n\n\ndef get_lp_result(image, boxes, masks, class_ids, class_names,\n scores=None, title=\"PLC\",\n figsize=(16, 16), ax=None,\n show_mask=True, show_bbox=True,\n colors=None, captions=None,\n score_threshold=0.8,show_score=True):\n \"\"\"\n boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n masks: [height, width, num_instances]\n class_ids: [num_instances]\n class_names: list of class names of the dataset\n scores: (optional) confidence scores for each box\n title: (optional) Figure title\n show_mask, show_bbox: To show masks and bounding boxes or not\n figsize: (optional) the size of the image\n colors: (optional) An array or colors to use with each object\n captions: (optional) A list of strings to use as captions for each object\n \"\"\"\n # Number of instances\n N = boxes.shape[0]\n global pcl_container\n global total_count\n if not N:\n\n print(\"\\n*** No License plate been detected *** \\n\")\n return \"\",0\n else:\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n\n scoreMin=score_threshold\n\n max_car_plate_score = -1\n car_plate_pos = []\n all_pos=[]\n all_sc=[]\n for i in range(N):\n if scores[i]>scoreMin:\n #print(class_names[class_ids[i]]+' scores:', scores[i])\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n all_pos.append([x1,y1,x2,y2])\n all_sc.append(scores[i])\n sc = scores[i]\n if sc>max_car_plate_score:\n car_plate_pos=[x1, y1, x2, y2]\n max_car_plate_score=sc\n #print(\"debug..........................\",max_car_plate_score)\n return all_pos,all_sc\n # if len(car_plate_pos):\n # return [car_plate_pos]\n # else:\n # return []\n\n\n\n\n\ndef get_char_result(image, boxes, masks, class_ids, class_names,\n scores=None, title=\"PLC\",\n figsize=(16, 16), ax=None,\n show_mask=True, show_bbox=True,\n colors=None, captions=None,\n score_threshold=0.8,show_score=True):\n\n N = boxes.shape[0]\n #print(N)\n global pcl_container\n global total_count\n if not N:\n print(\"\\n*** No Char been detected *** \\n\")\n return \"\",0\n else:\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n\n total_height=0\n for i in range(N):\n y1, x1, y2, x2 = boxes[i]\n height=y2-y1\n total_height+=height\n average_height=total_height/N\n colors = visualize.get_colors(38)\n #print(colors)\n scoreMin=score_threshold\n ls=[]\n bs=[]\n ss=[]\n\n for i in range(N):\n if scores[i]>scoreMin:\n #print(class_names[class_ids[i]]+' scores:', scores[i])\n color=(1,1,1)\n # Bounding box\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n class_id = class_ids[i]\n sc = scores[i]\n label=class_names[class_id]\n box=[(x1,y1),(x2,y2)]\n bs.append(box)\n ls.append(label)\n ss.append(sc)\n\n\n\n v_t=average_height*0.6\n h_t=average_height\n res,average_score=sequence(ls,bs,ss,v_t,h_t,image.shape)\n first=\"\"\n sec=\"\"\n if len(res)>1:\n for c in res[0]:\n first+=c\n for d in res[1]:\n sec+=d\n if len(first)>len(sec):\n temp=first\n first=sec\n sec=temp\n elif len(res)==1:\n if res[0]!=\"\":\n for d in res[0]:\n sec += d\n print(first,sec)\n res=\"\"\n if len(first+sec)>=3 and len(first+sec)<=7:\n if len(first)>1:\n res=first+\"_\"+sec\n else:\n res=sec\n\n return res,average_score\n\n\n\ndef detect(model, image_path, Min_score,type=\"detect_lp\",img=None):\n\n if np.all(img)==None:\n image=cv2.imread(image_path)\n # if image.shape[0]<200 or image.shape[1]<200:\n # s = (2 * image.shape[1], 2 * image.shape[0])\n # image = cv2.resize(src=image, dsize=s, interpolation=cv2.INTER_LANCZOS4)\n else:\n image=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n # if type==\"detect_chars\":\n # s = (2 * image.shape[1], 2 * image.shape[0])\n # image = cv2.resize(src=image, dsize=s, interpolation=cv2.INTER_LANCZOS4)\n if len(image.shape)<3:\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n image = image[..., ::-1]\n # cv2.imshow(\" \",image)\n # cv2.waitKey(0)\n t1=time.time()\n #print(model.config.NUM_CLASSES)\n r=model.detect([image],verbose=1)[0]\n print(\"detect time\",time.time()-t1)\n\n if type==\"detect_lp\":\n class_names=[\"BG\",\"car_plate\"]\n #result=np.empty(0)\n #should also return the scores\n all_car_plate_pos,all_scores=get_lp_result(image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'], show_bbox=True, score_threshold=Min_score,\n show_mask=False)\n all_images=[]\n all_boxs=[]\n if(len(all_car_plate_pos)):\n for car_plate_pos in all_car_plate_pos:\n image=image.astype(np.uint8).copy()\n #give some padding\n if car_plate_pos[1]-2>=0:\n car_plate_pos[1]-=2\n if car_plate_pos[3]+2<=image.shape[0]:\n car_plate_pos[3]+=2\n if car_plate_pos[0]-2>=0:\n car_plate_pos[0]-=2\n if car_plate_pos[2]+2<=image.shape[1]:\n car_plate_pos[2]+=2\n\n output=image[car_plate_pos[1]:car_plate_pos[3],car_plate_pos[0]:car_plate_pos[2]]\n all_images.append(output)\n all_boxs.append(car_plate_pos)\n\n # cv2.imshow(\" \",output)\n # cv2.waitKey(0)\n return all_images,image,all_scores,all_boxs\n\n\n else:\n\n class_names = [\"BG\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \"A\",\n \"B\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n \"G\",\n \"H\",\n \"J\",\n \"K\",\n \"L\",\n \"M\",\n \"N\",\n \"P\",\n \"R\",\n \"S\",\n \"T\",\n \"U\",\n \"V\",\n \"W\",\n \"X\",\n \"Y\",\n \"Z\",\n ]\n #result can be either the characters on the plate or the plate itself depends on the mode\n\n result,score = get_char_result(image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'], show_bbox=True, score_threshold=Min_score,\n show_mask=False)\n return result,score\n\ndef load_model(lp_path,char_path):\n lp_model=0\n char_model=0\n lp_config=lp_Config()\n lp_config.display()\n char_config=char_Config()\n\n\n\n if lp_path:\n lp_model=modellib.MaskRCNN(mode=\"inference\",config=lp_config,model_dir=\"./logs\")\n print(\"loading LP weights from path->\"+lp_path)\n lp_model.load_weights(lp_path,by_name=True)\n\n if char_path:\n char_model=modellib.MaskRCNN(mode=\"inference\",config=char_config,model_dir=\"./logs\")\n print(\"loading CHAR weights from path ->\"+char_path)\n char_model.load_weights(char_path,by_name=True)\n\n return lp_model,char_model\n\n\ndef process(lp_model,char_model,folder_path,show_result=False,log_path=\"\"):\n #if have lp model the image should be the car\n image_names=os.listdir(folder_path)\n if not len(image_names):\n print(\"empty folder !!!!!\")\n return {}\n #lps = []\n chars ={}\n correct_count=0\n clear_count=0\n correct_clear_count=0\n file = open(\"/home/dmitry/Documents/Projects/mask_rcnn_carplate/benchmark_reslut.txt\", 'w')\n c_file=open(\"/home/dmitry/Documents/Projects/mask_rcnn_carplate/benchmark_reslut_clear.txt\", 'w')\n c_image_pathe=\"/home/dmitry/Documents/Projects/mask_rcnn_carplate/normal_image\"\n for image_name in image_names:\n if \".png\" in image_name or \".jpg\" in image_name or \".jepg\" in image_name:\n image_dir=folder_path+\"/\"+image_name\n #temp = cv2.imread(image_dir)\n t_lp=[]\n contend=[]\n if lp_model:\n print(\"running on image {}\".format(image_name))\n best_one=[\"\",np.zeros(1),-1]#label pos score\n lps,temp,plate_score,all_boxs=detect(lp_model, image_path=image_dir, Min_score=0.50,type=\"detect_lp\")\n\n log=image_name+\" \"\n if not len(lps):\n chars[image_name]=\"no lp in the image\"\n log+=\"no lp been detected\"\n print(\"no lp\")\n # elif lp.shape[0]<20 or lp.shape[1]<10:\n # chars[image_name]=\" resolution too low\"\n else:\n all_fail=True\n for (lp,sc,box) in zip(lps,plate_score,all_boxs):\n if lp.shape[0]<10 or lp.shape[1]<5:\n continue\n char,average_char_score=detect(char_model,image_path=image_dir,Min_score=0.5,type=\"detect_char\",img=lp)\n\n if not len(char):\n print(\"bad angel or bad clarity \")\n chars[image_name]=\"bad angel or bad clarity\"\n\n all_fail=False\n else:\n #print(\"plate_contend................\",char)\n print(\"average_char_score: \",average_char_score,\"average_plate_sc: \",sc)\n if average_char_score+sc> best_one[2]:\n best_one = [char, lp, average_char_score+sc ,box]\n print(\"char\", char)\n\n t_lp.append(lp)\n contend.append(char)\n\n all_fail = False\n\n else:\n char,average_char_score=detect(char_model,image_path=image_dir,type=\"detect_char\")\n if not char:\n chars[image_name] = \"bad angel or bad clarity\"\n else:\n chars[image_name] = char\n\n\n #cv2.imshow(\"org\",temp)\n #if np.any(t_lp):\n\n # if len(contend):\n # for (c,l) in zip(contend,t_lp):\n # cv2.imshow(c,l)\n is_clear = True\n mark = [\"hide\", \"blur\", \"font\", \"ang\"]\n for m in mark:\n if m in image_name:\n is_clear = False\n break\n if is_clear:\n clear_count += 1\n if best_one[2]!=-1:\n #print(best_one[3])\n car_plate_pos=best_one[3]\n temp = cv2.rectangle(temp, (car_plate_pos[0], car_plate_pos[1]),\n (car_plate_pos[2], car_plate_pos[3]),\n (100, 20, 100), thickness=2)\n #txt=best_one[0].replace('_','')\n height=car_plate_pos[3]-car_plate_pos[1]\n cv2.putText(temp, best_one[0], (car_plate_pos[0], car_plate_pos[1] - int(20*height/80)), cv2.FONT_HERSHEY_PLAIN, 1.2*height/45, (0, 0, 255), 1)\n cv2.imwrite(c_image_pathe+'/'+image_name,temp)\n\n\n if best_one[2]!=-1:\n #cv2.imshow(best_one[0],best_one[1])\n\n w_r=\"wrong\"\n if best_one[0] in image_name:\n beg=image_name.find(best_one[0])\n if (beg==0 or image_name[beg-1]=='-') and (image_name[beg+len(best_one[0])]==\".\"or image_name[beg+len(best_one[0])]==\"_\" or image_name[beg+len(best_one[0])]==\"-\"):\n correct_count += 1\n if is_clear:\n correct_clear_count += 1\n w_r=\"correct\"\n log +=\"{}_result : {}\".format(w_r,best_one[0])\n print(best_one[0],image_name,w_r)\n chars[image_name] = best_one[0]\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n elif log!=image_name+\" \":\n log += \"less than 3 characters been detected\"\n file.writelines(log + \"\\n\")\n if (is_clear):\n c_file.writelines(log + \"\\n\")\n else:\n chars[image_name]=\"wrong format\"\n\n\n print(\"total image {}, {} is clear image,{} is bad image\".format(len(image_names),clear_count,len(image_names)-clear_count))\n print(\"reach {} percent accuaracy in all img\".format(correct_count/len(image_names)*100))\n print(\"reach {} percent accuaracy in clear img\".format(correct_clear_count / clear_count * 100))\n print(\"reach {} percent accuaracy in bad img\".format((correct_count-correct_clear_count) / (len(image_names)-clear_count) * 100))\n\n file.writelines(\"total image {}, {} is clear image,{} is bad image\\n\".format(len(image_names), clear_count,\n len(image_names) - clear_count))\n file.writelines(\"reach {} percent accuaracy\\n\".format(correct_count / len(image_names) * 100))\n file.writelines(\"reach {} percent accuaracy in clear img\\n\".format(correct_clear_count / clear_count * 100))\n file.writelines(\"reach {} percent accuaracy in bad img\\n\".format(\n (correct_count - correct_clear_count) / (len(image_names) - clear_count) * 100))\n return chars\n\n\nif __name__==\"__main__\":\n lp_path=\"/home/dmitry/Documents/Projects/mask_rcnn_carplate/lp_model.h5\"\n\n #char_path=\"/home/jianfenghuang/Desktop/VAL_LOG/PCL_LOGS/plc20181204T1204/mask_rcnn_plc_0189.h5\"\n char_path =\"/home/dmitry/Documents/Projects/mask_rcnn_carplate/mask_rcnn_plc_0999.h5\"\n #char_path = \"/home/jianfenghuang/Desktop/weights/this_is_the_best_char_weight.h5\"\n #char_path =\"/home/jianfenghuang/Desktop/weights/best_char_1214.h5\"\n # char_path=\"/home/jianfenghuang/Desktop/weights/mask_rcnn_plc_0513.h5\"\n\n # char_path=\"/home/jianfenghuang/Desktop/VAL_LOG/PCL_LOGS/plc20181218T1637/mask_rcnn_plc_0535.h5\" #535\n # char_path=\"/home/jianfenghuang/Desktop/VAL_LOG/PCL_LOGS/plc20181219T1758/mask_rcnn_plc_0300.h5\"\n #char_path='/home/jianfenghuang/Desktop/VAL_LOG/PCL_LOGS/plc20181220T1723/mask_rcnn_plc_0644.h5'\n lp_model,char_model=load_model(lp_path,char_path)\n results=process(lp_model,char_model,\"/home/dmitry/Documents/Projects/mask_rcnn_carplate/real_data/benchmark_folder\")\n #print (\"result .........................\")\n # for pairs in results:\n # print(pairs,results[pairs])" ]
[ [ "numpy.all", "numpy.any", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nohamanona/poke-auto-fuka
[ "9d355694efa0168738795afb403fc89264dcaeae" ]
[ "stm/volatile_class_get.py" ]
[ "import cv2\r\nimport numpy as np\r\nimport serial\r\nfrom stm.send_serial import SendSerial\r\n\r\nclass VolatileClassGet(object):\r\n def __init__(self):\r\n self.next_state = 'None'\r\n self.send_command = 'None'\r\n self._control_frame_count =0\r\n self.hatched_egg =0\r\n self.number_of_egg = 0\r\n self.one_egg_img = cv2.imread(\"data\\\\one_egg.png\")\r\n self.not_egg = 0\r\n\r\n def state_action(self, frame ,key , num_egg, htc_egg):\r\n self.hatched_egg =htc_egg\r\n self.number_of_egg = num_egg\r\n self.action_frame = frame\r\n self.get_action(frame)\r\n\r\n def get_action(self,frame):\r\n #print(self._control_frame_count,'GET frame')\r\n if self._control_frame_count == 0:\r\n self.send_command = 'Button A'\r\n self._control_frame_count += 1\r\n elif self._control_frame_count == 80:\r\n self.send_command = 'Button A'\r\n self._control_frame_count += 1\r\n elif self._control_frame_count == 320:\r\n self.send_command = 'Button A'\r\n self._control_frame_count += 1\r\n elif self._control_frame_count == 450:\r\n self.send_command = 'Button A'\r\n self._control_frame_count += 1\r\n elif self._control_frame_count == 550:\r\n self.send_command = 'Button A'\r\n self._control_frame_count +=1\r\n elif self._control_frame_count == 670:\r\n self.not_egg = self.detect_egg(frame)\r\n self.send_command = 'HAT BOTTOM'\r\n self._control_frame_count +=1\r\n elif self._control_frame_count == 680:\r\n if self.not_egg >= 2:\r\n self.send_command = 'HAT BOTTOM'\r\n self._control_frame_count +=1\r\n elif self._control_frame_count == 690:\r\n if self.not_egg >= 3:\r\n self.send_command = 'HAT BOTTOM'\r\n self._control_frame_count +=1\r\n elif self._control_frame_count == 700:\r\n if self.not_egg >= 4:\r\n self.send_command = 'HAT BOTTOM'\r\n self._control_frame_count +=1\r\n elif self._control_frame_count == 710:\r\n if self.not_egg >= 5:\r\n self.send_command = 'HAT BOTTOM'\r\n self._control_frame_count +=1\r\n elif self._control_frame_count == 720:\r\n self.send_command = 'Button A'\r\n self._control_frame_count +=1\r\n self.number_of_egg +=1\r\n elif self._control_frame_count == 900:\r\n self.send_command = 'Button A'\r\n self._control_frame_count +=1\r\n elif self._control_frame_count == 1000:\r\n self.send_command = 'Button A'\r\n self._control_frame_count +=1\r\n elif self._control_frame_count == 1050:\r\n self.send_command = 'None'\r\n self._control_frame_count =0\r\n self.next_state = 'RUN'\r\n\r\n else:\r\n self.send_command = 'None'\r\n self._control_frame_count += 1\r\n\r\n def detect_egg(self, frame):\r\n detect_egg = 0\r\n egg_tmprate = np.int8(self.one_egg_img)\r\n egg1 = np.int8(frame[209:295,57:450,:])\r\n egg2 = np.int8(frame[305:391,57:450,:])\r\n egg3 = np.int8(frame[401:487,57:450,:])\r\n egg4 = np.int8(frame[497:583,57:450,:])\r\n egg5 = np.int8(frame[593:679,57:450,:])\r\n egg1_dif = np.amax(abs(egg_tmprate - egg1))\r\n egg2_dif = np.amax(abs(egg_tmprate - egg2))\r\n egg3_dif = np.amax(abs(egg_tmprate - egg3))\r\n egg4_dif = np.amax(abs(egg_tmprate - egg4))\r\n egg5_dif = np.amax(abs(egg_tmprate - egg5))\r\n\r\n if egg1_dif > 10:\r\n detect_egg = 1\r\n elif egg2_dif > 10:\r\n detect_egg = 2\r\n elif egg3_dif > 10:\r\n detect_egg = 3\r\n elif egg4_dif > 10:\r\n detect_egg = 4\r\n elif egg5_dif > 10:\r\n detect_egg = 5\r\n print('detect egg = ',detect_egg)\r\n return detect_egg" ]
[ [ "numpy.int8" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rishavsen1/transit-simulator
[ "e5ae747b304243f8fed06e34b99f9b0d61547e5a" ]
[ "manual_files/codes/process.py" ]
[ "import dask.dataframe as dd\nimport pandas as pd\n## trajectory for all vehicles during the simulation time interval\nmotion = dd.read_csv(\"trajectories_outputmotionState.csv\",sep=';',low_memory=False)\nprint(\"motion file imported. length\",motion.shape[0])\nvehtype = pd.read_csv(\"trajectories_outputactorConfig.csv\",sep=';')\nprint('actor config imported. lenthi', vehtype.shape[0])\nvehref = pd.read_csv(\"trajectories_outputvehicle.csv\",sep=';')\nprint('vehref imported. length', vehref.shape[0])\n# extract the output values for buses\nvehref['vehicle_ref'] = vehref['vehicle_ref'].astype('str')\nbus=vehref[vehref['vehicle_ref'].apply(lambda x: len(x)>20)]\nbusref=bus[['vehicle_ref','vehicle_id','vehicle_actorConfig']]\nbusref= busref.rename(columns={'vehicle_actorConfig' : 'actorConfig_id'})\nprint('busref',busref.shape[0])\n# join busref and vehtype by the same column 'actorConfig_id'\nbusinfo=pd.merge(busref, vehtype, on='actorConfig_id')\ntraj=motion.loc[motion.motionState_vehicle.isin(businfo.vehicle_id) ]\ntraj=traj[['motionState_vehicle','motionState_time','motionState_speed','motionState_acceleration']]\n# traj=traj.sort_values(['motionState_vehicle','motionState_time'])\ntraj=traj.rename(columns={'motionState_vehicle' : 'vehicle_id','motionState_time':'time','motionState_speed':'speed',\n 'motionState_acceleration':'acceleration'})\nprint('traj',traj.shape[0])\n# UNIT: time:milliseconds, speed:0.01m/s, acceleration:0.0001m/s^2\ntrajectory=dd.merge(traj, businfo, on='vehicle_id')\ntrajectory=trajectory.drop(['vehicle_id'],axis=1)\nprint(trajectory.columns)\ndef write_file(grp):\n pc = grp[\"vehicle_ref\"].unique()[0]\n pc = pc.replace(':','')\n grp.to_csv(f\"./outtest/\"+ 'Trajectory_' + pc + \".csv\",\n header=False,\n index=False)\n return None\n\n\ntrajectory.groupby('vehicle_ref').apply(write_file, meta=('x', 'f8')).compute()\n#group dataframe into multiple dataframe as a dict by bus name\n#trajectory=dict(tuple(trajectory.groupby('vehicle_ref')))\n#write in csv files, bus trip name as the file name\n#for key, df in trajectory.items():\n# bus=key.replace(':','')\n# with open('./outtest/' + 'Trajectory_' + bus + '.csv', 'w', newline='') as oFile:\n# df.to_csv(oFile, index = False)\n# print(\"Finished writing: \" + 'Trajectory_' + bus)\n\n" ]
[ [ "pandas.merge", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
wassname/transfer-learning-conv-ai
[ "879fedf136cc84d93c41f3fdfa58c6f42857f796" ]
[ "interact_server.py" ]
[ "# # Copyright (c) 2019-present, HuggingFace Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\npython interact_server.py --max_history 4 --top_p 0.8 --fp16 O2 --model_checkpoint runs/Jul19_14-38-58_ip-172-31-39-133_goood\n\"\"\"\nimport logging\nimport random\nfrom argparse import ArgumentParser\nfrom itertools import chain\nfrom pathlib import Path\nfrom pprint import pformat\nimport time\nfrom fuzzywuzzy import fuzz\n\nimport zmq\nimport coloredlogs\nimport crayons\nimport torch\nimport json\nimport torch.nn.functional as F\nimport collections\n\nfrom data import MJC_FINETUNED_MODEL, download_targz_to_folder\nfrom pytorch_pretrained_bert import (GPT2LMHeadModel, GPT2Tokenizer,\n OpenAIGPTLMHeadModel, OpenAIGPTTokenizer)\nfrom train import SPECIAL_TOKENS, build_input_from_segments\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__file__)\ncoloredlogs.install(logging.DEBUG)\nlogging.getLogger('zmqtest').setLevel(logging.DEBUG)\n\nTOPICS = [str(i) for i in range(1, 1000)]\n\ndef mogrify(topic, msg):\n \"\"\" json encode the message and prepend the topic \"\"\"\n logger.debug(f\"mogrify: topic={topic} msg={msg}\")\n return topic + ' ' + json.dumps(msg)\n\ndef demogrify(topicmsg):\n \"\"\" Inverse of mogrify() \"\"\"\n json0 = topicmsg.find('{')\n topic = topicmsg[0:json0].strip()\n msg = json.loads(topicmsg[json0:])\n logger.debug(f\"demogrify: topic={topic} msg={msg}\")\n return topic, msg \n \nclass ModelAPI(object):\n \"\"\"Client api obj.\"\"\"\n def __init__(self, port=5586):\n port = int(port)\n # Zeromq to pytorch server \n context = zmq.Context()\n self.topic = random.choice(TOPICS)\n self.socket_out = context.socket(zmq.PUB)\n self.socket_out.connect(\"tcp://localhost:%s\" % port)\n logging.info(f\"zmq PUB to {port}\")\n\n self.socket_in = context.socket(zmq.SUB)\n self.socket_in.connect(\"tcp://localhost:%s\" % (port+1))\n self.socket_in.setsockopt_string(zmq.SUBSCRIBE, self.topic)\n self.socket_in.setsockopt_string(zmq.SUBSCRIBE, 'serverconfig')\n logging.info(f\"zmq SUB to {port+1}, topic={self.topic}\")\n\n logger.info(\"Asking and waiting for initial server config\")\n time.sleep(1)\n self.socket_out.send_string(mogrify('serverconfig', {})) \n topic, msg = demogrify(self.socket_in.recv_string())\n assert topic=='serverconfig'\n self.server_config = msg\n logger.info(\"Connected to server, received initial message: %s\", self.server_config)\n\n self.history = collections.defaultdict(list)\n self.personalities = self.server_config[\"training_args\"][\"subreddit\"]\n\n def reset(self, name):\n self.history[name] = []\n return f'<reset memory of {name}>'\n\n def roast(self, reply, name, personality=None):\n # return '$ROAST'\n self.history[name].append(reply)\n if personality is None:\n # Choose a random conditional personality from training options\n personality = random.choice(self.server_config[\"training_args\"][\"subreddit\"])\n payload = dict(personality=personality, history=self.history[name])\n logger.debug(\"payload %s\", payload)\n\n self.socket_out.send_string(mogrify(self.topic, payload)) \n topic = None\n while topic != self.topic:\n topic, msg = demogrify(self.socket_in.recv_string())\n \n reply = msg[\"data\"]\n \n # To avoid looping 5% chance of forgetting all, 25% change of not remembering what it said\n if random.random()<5:\n self.history[name] = [] \n elif random.random()<25:\n pass\n else: \n self.history[name].append(reply)\n\n # Keep history at managable length\n self.history[name] = self.history[name][-10:]\n return reply\n\n\ndef top_filtering(\n logits, top_k=0, top_p=0.0, threshold=-float(\"Inf\"), filter_value=-float(\"Inf\")\n):\n \"\"\" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering\n Args:\n logits: logits distribution shape (vocabulary size)\n top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.\n top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset\n whose total probability mass is greater than or equal to the threshold top_p.\n In practice, we select the highest probability tokens whose cumulative probability mass exceeds\n the threshold top_p.\n threshold: a minimal threshold to keep logits\n \"\"\"\n assert (\n logits.dim() == 1\n ) # Only work for batch size 1 for now - could update but it would obfuscate a bit the code\n top_k = min(top_k, logits.size(-1))\n if top_k > 0:\n # Remove all tokens with a probability less than the last token in the top-k tokens\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p > 0.0:\n # Compute cumulative probabilities of sorted tokens\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probabilities = torch.cumsum(\n F.softmax(sorted_logits, dim=-1), dim=-1\n )\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probabilities > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # Back to unsorted indices and set them to -infinity\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\n logits[indices_to_remove] = filter_value\n\n indices_to_remove = logits < threshold\n logits[indices_to_remove] = filter_value\n\n return logits\n\n\ndef sample_sequence(personality, history, tokenizer, model, args, current_output=None):\n special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)\n if current_output is None:\n current_output = []\n\n for i in range(args.max_length):\n authors = [str(i%2) for i in range(len(history))]\n instance, sequence = build_input_from_segments(\n personality, history, current_output, authors, tokenizer, with_eos=False, max_len=1024\n )\n\n input_ids = torch.tensor(instance[\"input_ids\"], device=args.device).unsqueeze(0)\n token_type_ids = torch.tensor(\n instance[\"token_type_ids\"], device=args.device\n ).unsqueeze(0)\n\n logits = model(input_ids, token_type_ids=token_type_ids)\n\n if \"gpt2\" == args.model:\n logits = logits[0]\n logits = logits[0, -1, :] / args.temperature\n logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)\n probs = F.softmax(logits, dim=-1)\n\n prev = (\n torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)\n )\n if i < args.min_length and prev.item() in special_tokens_ids:\n # Sometimes the model fails to abide by the min output length, lets try only 20 times to avoid a inf loop\n for j in range(20):\n if prev.item() in special_tokens_ids:\n prev = torch.multinomial(probs, num_samples=1)\n else:\n break\n\n if prev.item() in special_tokens_ids:\n break\n current_output.append(prev.item())\n\n return current_output\n\n\ndef run():\n parser = ArgumentParser()\n parser.add_argument(\n \"--model\", type=str, default=\"gpt2\", help=\"Model type (gpt or gpt2)\"\n )\n parser.add_argument(\n \"--model_checkpoint\",\n type=str,\n default=\"\",\n help=\"Path, url or short name of the model\",\n )\n parser.add_argument(\n \"--max_history\",\n type=int,\n default=20,\n help=\"Number of previous utterances to keep in history\",\n )\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device (cuda or cpu)\",\n )\n parser.add_argument(\n \"--fp16\",\n type=str,\n default=\"\",\n help=\"Set to O0, O1, O2 or O3 for fp16 training (see apex documentation). Try O2. Note first char is the letter 'oh'\",\n )\n parser.add_argument(\n \"--no_sample\",\n action=\"store_true\",\n help=\"Set to use greedy decoding instead of sampling\",\n )\n parser.add_argument(\n \"--max_length\",\n type=int,\n default=200,\n help=\"Maximum length of the output utterances\",\n )\n parser.add_argument(\n \"--port\",\n type=int,\n default=5586,\n help=\"zeromq port\",\n )\n parser.add_argument(\n \"--min_length\",\n type=int,\n default=20,\n help=\"Minimum length of the output utterances\",\n )\n parser.add_argument(\"--seed\", type=int, default=None, help=\"Seed\")\n parser.add_argument(\n \"--temperature\", type=int, default=0.7, help=\"Sampling softmax temperature\"\n )\n parser.add_argument(\n \"--top_k\",\n type=int,\n default=0,\n help=\"Filter top-k tokens before sampling (<=0: no filtering)\",\n )\n parser.add_argument(\n \"--top_p\",\n type=float,\n default=0.6,\n help=\"Nucleus filtering (top-p) before sampling (<=0.0: no filtering)\",\n )\n args = parser.parse_args()\n\n model_training_args = Path(args.model_checkpoint).joinpath(\n \"model_training_args.bin\"\n )\n training_args = torch.load(model_training_args.open(\"rb\"))\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__file__)\n logger.info(pformat(args))\n\n context = zmq.Context()\n logger.info(f\"bind ZMQ SUB on port {args.port}\")\n socket_in = context.socket(zmq.SUB)\n socket_in.bind(\"tcp://127.0.0.1:%s\" % args.port)\n socket_in.setsockopt_string(zmq.SUBSCRIBE, 'serverconfig')\n for topic in TOPICS:\n socket_in.setsockopt_string(zmq.SUBSCRIBE, topic)\n\n logger.info(f\"bind ZMQ PUB on port {args.port+1}\")\n socket_out = context.socket(zmq.PUB)\n socket_out.bind(\"tcp://127.0.0.1:%s\" % (args.port+1))\n\n time.sleep(1)\n logger.info(f\"zmq ready you can now start clients on port {args.port}\")\n server_config = dict(args=args.__dict__, training_args=training_args.__dict__) \n # socket_out.send_string(mogrify(\"serverconfig\", server_config))\n\n if args.model_checkpoint == \"\":\n args.model_checkpoint = download_targz_to_folder(MJC_FINETUNED_MODEL)\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.random.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n\n logger.info(\"Get pretrained model and tokenizer\")\n tokenizer_class = GPT2Tokenizer if \"gpt2\" == args.model else OpenAIGPTTokenizer\n tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)\n model_class = GPT2LMHeadModel if \"gpt2\" == args.model else OpenAIGPTLMHeadModel\n model = model_class.from_pretrained(args.model_checkpoint)\n\n model.to(args.device)\n model.eval()\n\n if args.fp16:\n from apex import amp # Apex is only required if we use fp16 training\n model = amp.initialize(model, opt_level=args.fp16)\n\n logger.info(\"Sample a personality\")\n personalities_str = getattr(training_args, \"subreddit\", [])\n personalities = [\n [tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))]\n for obj in personalities_str\n ]\n if not personalities:\n raise FileNotFoundError(\n f\"Could not load personalities from file {model_training_args}\"\n )\n personality = random.choice(personalities)\n print(\"training personalities\", [tokenizer.decode(chain(*p)) for p in personalities])\n\n \n # context = zmq.Context()\n # logger.info(f\"bind ZMQ SUB on port {args.port}\")\n # socket_in = context.socket(zmq.SUB)\n # socket_in.bind(\"tcp://127.0.0.1:%s\" % args.port)\n # socket_in.setsockopt(zmq.SUBSCRIBE, '0')\n # for topic in TOPICS:\n # socket_in.setsockopt(zmq.SUBSCRIBE, topic)\n\n # logger.info(f\"bind ZMQ PUB on port {args.port+1}\")\n # socket_out = context.socket(zmq.PUB)\n # socket_out.bind(\"tcp://127.0.0.1:%s\" % (args.port+1))\n\n # time.sleep(1)\n # server_config = dict(args=args.__dict__, training_args=training_args.__dict__) \n # socket_out.send_string(mogrify(\"serverconfig\", server_config)) \n\n def encode(s):\n return tokenizer.encode(s)[:1024]\n\n while True:\n logger.info('ZMQ waiting to receive')\n topic, msg = demogrify(socket_in.recv_string())\n if topic == 'serverconfig':\n socket_out.send_string(mogrify(\"serverconfig\", server_config))\n else: \n try:\n logger.debug('msg received %s', msg)\n with torch.no_grad():\n personality = [encode(msg['personality'])]\n history = [encode(h) for h in msg['history']]\n out_ids = sample_sequence(personality, history, tokenizer, model, args)\n out_text = tokenizer.decode(out_ids, skip_special_tokens=True)\n socket_out.send_string(mogrify(topic, dict(data=out_text)))\n time.sleep(1)\n except Exception as e:\n logger.warn(\"Error while processing message: %s\", e)\n socket_out.send_string(mogrify(topic, dict(data=f\"ERROR TOO MUCH ROAST: {e}\")))\n\n\nif __name__ == \"__main__\":\n run()\n" ]
[ [ "torch.nn.functional.softmax", "torch.cuda.manual_seed", "torch.random.manual_seed", "torch.multinomial", "torch.tensor", "torch.no_grad", "torch.sort", "torch.cuda.is_available", "torch.topk" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maoyingxue/meterReader
[ "bf1fe1858e084f2c4e8f82346bdadd66cf51faeb" ]
[ "algorithm/OCR/utils.py" ]
[ "import sys\nimport cv2\nimport os\nimport numpy as np\nimport torch\n\nfrom algorithm.debug import *\n\nsys.path.append(\".\")\n\n\ndef fillAndResize(image):\n \"\"\"\n 将输入图像填充为正方形且变换为(28,28)\n :param image:\n :return:\n \"\"\"\n h, w = image.shape\n l = max(w, h)\n ret = np.zeros((l, l), np.uint8)\n leftTop = np.array([l/2-w/2, l/2-h/2], np.uint8)\n ret[leftTop[1]:leftTop[1]+h, leftTop[0]:leftTop[0]+w] = image\n ret = cv2.resize(ret, (28, 28), interpolation=cv2.INTER_CUBIC)\n return ret\n\n\nclass newNet(object):\n def __init__(self):\n \"\"\"\n 初始化LeNet模型\n :return:\n \"\"\"\n sys.path.append(\"newNet\")\n from algorithm.OCR.newNet.LeNet import myNet\n\n self.net = myNet()\n self.net.eval()\n self.net.load_state_dict(torch.load(\"algorithm/OCR/newNet/net.pkl\"))\n\n def recognizeNet(self, image):\n \"\"\"\n LeNet识别图像中的数字\n :param image: 输入图像\n :return: 识别的数字值\n \"\"\"\n image = fillAndResize(image)\n tensor = torch.Tensor(image).view((1, 1, 28, 28))/255\n\n tensor = tensor.to(\"cpu\")\n result = self.net.forward(tensor)\n _, predicted = torch.max(result.data, 1)\n num = int(np.array(predicted[0]).astype(np.uint32))\n\n if not os.path.exists(\"storeDigitData\"):\n os.system(\"mkdir storeDigitData\")\n imgNum = len(os.listdir(\"storeDigitData/\"))\n cv2.imwrite(\"storeDigitData/\" + str(imgNum) + \"_\" + str(num) + \".bmp\", image)\n\n if ifShow:\n print(num)\n cv2.imshow(\"single\", image)\n cv2.waitKey(0)\n\n return str(num) if num != 10 else \"?\"\n" ]
[ [ "torch.max", "torch.Tensor", "torch.load", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
j96w/CoGail
[ "fe61a5af262cf30967adc070d7a364b832d1763d" ]
[ "configs/exp1_config.py" ]
[ "import argparse\nimport torch\n\ndef get_args():\n parser = argparse.ArgumentParser(description='CoGAIL')\n parser.add_argument(\n '--render-mode',\n default='headless',\n help='which visualization mode to use: headless or gui')\n parser.add_argument(\n '--algo',\n default='ppo',\n help='algorithm to use: a2c | ppo | acktr')\n parser.add_argument(\n '--gail',\n action='store_true',\n default=True,\n help='do imitation learning with gail')\n parser.add_argument(\n '--gail-experts-dir',\n default='dataset/dataset-continuous-info-act',\n help='directory that contains expert demonstrations for gail')\n parser.add_argument(\n '--gail-batch-size',\n type=int,\n default=128,\n help='gail batch size')\n parser.add_argument(\n '--gail-epoch',\n type=int,\n default=5,\n help='gail epochs')\n parser.add_argument(\n '--recode_dim',\n type=int,\n default=102,\n help='input feature dim of the code reconstruction model')\n parser.add_argument(\n '--code_size',\n type=int,\n default=2,\n help='size of the code')\n parser.add_argument(\n '--lr',\n type=float,\n default=3e-4,\n help='learning rate')\n parser.add_argument(\n '--eps',\n type=float,\n default=1e-5,\n help='RMSprop optimizer epsilon')\n parser.add_argument(\n '--alpha',\n type=float,\n default=0.99,\n help='RMSprop optimizer apha')\n parser.add_argument(\n '--gamma',\n type=float,\n default=0.99,\n help='discount factor for rewards')\n parser.add_argument(\n '--use-gae',\n action='store_true',\n default=True,\n help='use generalized advantage estimation')\n parser.add_argument(\n '--gae-lambda',\n type=float,\n default=0.95,\n help='gae lambda parameter')\n parser.add_argument(\n '--entropy-coef',\n type=float,\n default=0.0,\n help='entropy term coefficient')\n parser.add_argument(\n '--value-loss-coef',\n type=float,\n default=0.5,\n help='value loss coefficient')\n parser.add_argument(\n '--max-grad-norm',\n type=float,\n default=0.5,\n help='max norm of gradients')\n parser.add_argument(\n '--seed',\n type=int,\n default=1,\n help='random seed')\n parser.add_argument(\n '--cuda-deterministic',\n action='store_true',\n default=False,\n help=\"sets flags for determinism when using CUDA (potentially slow!)\")\n parser.add_argument(\n '--num-processes',\n type=int,\n default=1,\n help='how many training CPU processes to use')\n parser.add_argument(\n '--num-steps',\n type=int,\n default=6000,\n help='number of forward steps')\n parser.add_argument(\n '--ppo-epoch',\n type=int,\n default=10,\n help='number of ppo epochs')\n parser.add_argument(\n '--bc-pretrain-steps',\n type=int,\n default=30,\n help='number of bc pretrain steps')\n parser.add_argument(\n '--num-mini-batch',\n type=int,\n default=32,\n help='number of batches for ppo')\n parser.add_argument(\n '--clip-param',\n type=float,\n default=0.2,\n help='ppo clip parameter')\n parser.add_argument(\n '--log-interval',\n type=int,\n default=1,\n help='log interval, one log per n updates')\n parser.add_argument(\n '--save-interval',\n type=int,\n default=100,\n help='save interval, one save per n updates')\n parser.add_argument(\n '--eval-interval',\n type=int,\n default=30,\n help='eval interval, one eval per n updates')\n parser.add_argument(\n '--num-env-steps',\n type=int,\n default=6000000,\n help='number of environment steps to train')\n parser.add_argument(\n '--env-name',\n default='cogail_exp1_2dfq',\n help='environment to train on')\n parser.add_argument(\n '--log-dir',\n default='/tmp/gym/',\n help='directory to save agent logs (default: /tmp/gym)')\n parser.add_argument(\n '--save-dir',\n default='./trained_models/',\n help='directory to save agent logs')\n parser.add_argument(\n '--base-net-small',\n action='store_true',\n default=True,\n help='use smaller base net (works better on low dim controller)')\n parser.add_argument(\n '--use-cross-entropy',\n action='store_true',\n default=True,\n help='use cross entropy loss to update discriminator (works better on low dim controller)')\n parser.add_argument(\n '--use-curriculum',\n action='store_true',\n default=False,\n help='use curriculum step size')\n parser.add_argument(\n '--no-cuda',\n action='store_true',\n default=False,\n help='disables CUDA training')\n parser.add_argument(\n '--use-proper-time-limits',\n action='store_true',\n default=True,\n help='compute returns taking into account time limits')\n parser.add_argument(\n '--recurrent-policy',\n action='store_true',\n default=False,\n help='use a recurrent policy')\n parser.add_argument(\n '--use-linear-lr-decay',\n action='store_true',\n default=True,\n help='use a linear schedule on the learning rate')\n args = parser.parse_args()\n\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n\n assert args.algo in ['a2c', 'ppo', 'acktr']\n if args.recurrent_policy:\n assert args.algo in ['a2c', 'ppo'], \\\n 'Recurrent policy is not implemented for ACKTR'\n\n return args\n" ]
[ [ "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
justinbt1/Multimodal-Document-Classification
[ "794eb1e1235efc9c81f1edca881db576d754628a" ]
[ "experimental_models/utils/data.py" ]
[ "import os\nimport json\nimport PIL\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow import keras\n\nfrom utils.database import get_db_table\n\n\nclass DocumentData:\n \"\"\" Loads and prepares text and image data prior to modelling.\n\n Attributes:\n seed(int): Seed for random processes.\n n_classes(int): Number of classes present in dataset.\n vocab_length(int): Number of unique terms in vocabulary.\n text_train(np.array): Text training tokens.\n text_val(np.array): Text validation tokens.\n text_test(np.array): Text test tokens.\n image_train(np.array): Sequence of n training images.\n image_val(np.array): Sequence of n validation images.\n image_test(np.array): Sequence of n test images.\n y_train(np.array): Training class labels.\n y_val(np.array): Validation class labels.\n y_test(np.array): Test class labels.\n\n \"\"\"\n def __init__(self, label_map, seed, test_size=0.2, validation_size=0.2, drop_nans=False):\n \"\"\" TextData object constructor.\n\n Args:\n label_map(dict): label_map(dict): Mapping of labels to numbers.\n seed(int): Seed for random processes.\n test_size(float): Size of test hold out set.\n validation_size(float): Size of validation set.\n drop_nans(bool): Drop nan data?\n\n \"\"\"\n self.seed = seed\n self.n_classes = len(label_map)\n data_frame = get_db_table()\n data_frame.drop_duplicates(['image_dir_ref', 'text_json_ref'], inplace=True)\n\n if drop_nans:\n if drop_nans == 'all':\n data_frame = data_frame.loc[\n (data_frame['image_extracted'] == 1) & (data_frame['text_extracted'] == 1)\n ]\n elif drop_nans == 'text':\n data_frame = data_frame.loc[data_frame['text_extracted'] == 1]\n elif drop_nans == 'image':\n data_frame = data_frame.loc[data_frame['image_extracted'] == 1]\n elif drop_nans == 'or':\n data_frame = data_frame.loc[\n (data_frame['image_extracted'] == 1) | (data_frame['text_extracted'] == 1)\n ]\n else:\n raise ValueError(\n 'drop_nans parameter is invalid please use False, \"all\", \"text\", \"image\" or \"Or\"'\n )\n\n labels = np.array([label_map[label] for label in data_frame['label']])\n\n x_train, x_test, y_train, y_test = train_test_split(\n data_frame.index,\n labels,\n test_size=test_size,\n train_size=1.0 - test_size,\n random_state=seed,\n shuffle=True,\n stratify=labels\n )\n\n x_train, x_val, y_train, y_val = train_test_split(\n x_train,\n y_train,\n test_size=validation_size,\n train_size=1.0 - validation_size,\n random_state=seed,\n shuffle=True,\n stratify=y_train\n )\n\n self._x_train = data_frame.loc[x_train]\n self._x_val = data_frame.loc[x_val]\n self._x_test = data_frame.loc[x_test]\n\n self.vocab_length = 0\n\n self.text_train = None\n self.text_val = None\n self.text_test = None\n\n self.image_train = None\n self.image_val = None\n self.image_test = None\n\n self.y_train = keras.utils.to_categorical(y_train, num_classes=self.n_classes)\n self.y_val = keras.utils.to_categorical(y_val, num_classes=self.n_classes)\n self.y_test = keras.utils.to_categorical(y_test, num_classes=self.n_classes)\n\n self.data_frame = data_frame\n\n def load_text_data(self, text_length=2000):\n \"\"\" Loads text and processes sequences for modelling.\n\n Args:\n text_length(int): Maximum number of words in sequence.\n\n \"\"\"\n train_text = self._load_processed_text(self._x_train['text_json_ref'])\n val_text = self._load_processed_text(self._x_val['text_json_ref'])\n test_text = self._load_processed_text(self._x_test['text_json_ref'])\n\n tokenizer = keras.preprocessing.text.Tokenizer(oov_token=1, split=' ')\n tokenizer.fit_on_texts(train_text)\n self.vocab_length = len(tokenizer.word_index) + 1\n\n train_text = tokenizer.texts_to_sequences(train_text)\n val_text = tokenizer.texts_to_sequences(val_text)\n test_text = tokenizer.texts_to_sequences(test_text)\n\n self.text_train = keras.preprocessing.sequence.pad_sequences(\n train_text,\n maxlen=text_length,\n padding='post',\n truncating='post',\n value=0\n )\n\n self.text_val = keras.preprocessing.sequence.pad_sequences(\n val_text,\n maxlen=text_length,\n padding='post',\n truncating='post',\n value=0\n )\n\n self.text_test = keras.preprocessing.sequence.pad_sequences(\n test_text,\n maxlen=text_length,\n padding='post',\n truncating='post',\n value=0\n )\n\n @staticmethod\n def _load_processed_text(db_series):\n \"\"\" Loads processed text from JSON output files.\n\n Args:\n db_series(pd.Series): Data series containing JSON file paths.\n\n Returns:\n list: List of extracted text strings.\n\n \"\"\"\n texts = []\n\n for text_path in db_series:\n text = ''\n\n if os.path.exists(text_path):\n text_file = open(text_path, 'rt')\n text_json = json.load(text_file)\n text_file.close()\n\n text = text_json['Clean Content']\n\n if text:\n text = ' '.join(text.split()[0:2000])\n\n texts.append(text)\n\n return texts\n\n def load_image_data(self, image_size=200, n_pages=10, sequential=False):\n \"\"\" Returns page image sequences for all documents.\n\n Args:\n image_size(int): Image size, assumes image is a square.\n n_pages(int): Number of page images in sequence.\n sequential(bool): Return all pages for all documents as a single sequence.\n\n Returns:\n tuple: page image sequences for all documents, labels.\n\n \"\"\"\n if image_size > 500:\n raise ValueError('Image size cannot exceed 500!')\n\n self.image_train = self._load_document_images(\n self._x_train['image_dir_ref'], image_size, n_pages, sequential\n )\n\n self.image_val = self._load_document_images(\n self._x_val['image_dir_ref'], image_size, n_pages, sequential\n )\n\n self.image_test = self._load_document_images(\n self._x_test['image_dir_ref'], image_size, n_pages, sequential\n )\n\n @staticmethod\n def _data_paths(sample_directories, n_pages):\n \"\"\" Get all image sequence paths.\n\n Args:\n sample_directories(np.array): Document image paths.\n n_pages(int): Number of page images in sequence.\n\n Returns:\n dict: Document, image path pairs.\n\n \"\"\"\n document_paths = {}\n for directory in sample_directories:\n if not os.path.exists(directory):\n document_paths[directory] = []\n continue\n\n page_images = os.listdir(directory)[0:n_pages]\n\n if not page_images:\n document_paths[directory] = []\n continue\n\n document_paths[directory] = page_images\n return document_paths\n\n def _load_document_images(self, directory_paths, image_size, n_pages, sequential):\n \"\"\" Loads document images from document image directories.\n\n Args:\n directory_paths(np.array): List of directories containing page images.\n image_size(int): Image size, assumes image is a square.\n n_pages(int): Number of page images in sequence.\n sequential(bool): Return all pages for all documents as a single sequence.\n\n Returns:\n np.array: 2D or 3D array containing the page images of each document.\n\n \"\"\"\n document_paths = self._data_paths(directory_paths, n_pages)\n\n x = []\n\n for document_path in document_paths:\n image_paths = document_paths[document_path]\n\n image_sequence = self._process_sequences(\n document_path, image_paths, image_size, n_pages\n )\n\n if n_pages == 1:\n image_sequence = image_sequence[0]\n\n x.append(image_sequence)\n\n x = np.array(x)\n x = x / 255\n\n if n_pages == 1:\n x = x.reshape((x.shape[0], image_size, image_size, 1))\n elif sequential:\n x = x.reshape((x.shape[0] * n_pages, image_size, image_size, 1))\n else:\n x = x.reshape((x.shape[0], n_pages, image_size, image_size, 1))\n\n return x\n\n @staticmethod\n def _process_sequences(dir_path, image_paths, image_size, n_pages):\n \"\"\" Loads sequence of page images as array.\n\n Args:\n dir_path(str): Document image directory path.\n image_paths(list): Paths to page images.\n image_size(int): Image size, assumes image is a square.\n n_pages(int): Number of page images in sequence.\n\n Returns:\n np.array: Array of image sequences.\n\n \"\"\"\n image_sequence = []\n for page_image in image_paths:\n image_path = os.path.join(dir_path, page_image)\n image = PIL.Image.open(image_path).convert('L')\n image = image.resize((image_size, image_size))\n image_sequence.append(np.array(image))\n\n sequence_length = len(image_sequence)\n\n if sequence_length < n_pages:\n n_blank_images = n_pages - sequence_length\n image_sequence += [np.zeros((image_size, image_size))] * n_blank_images\n\n image_sequence = np.array(image_sequence)\n\n return image_sequence\n" ]
[ [ "tensorflow.keras.preprocessing.text.Tokenizer", "sklearn.model_selection.train_test_split", "numpy.array", "numpy.zeros", "tensorflow.keras.utils.to_categorical", "tensorflow.keras.preprocessing.sequence.pad_sequences" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
mlvc-lab/VDSR_pytorch
[ "37b403b61006ac604a6ef44485aa248d25a17200" ]
[ "main.py" ]
[ "from __future__ import print_function\nimport argparse\nfrom math import log10, sqrt\nimport time\nimport os\nfrom os import errno\nfrom os.path import join\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom data import get_training_set, get_validation_set, get_test_set\nfrom model import VDSR\n\n\nparser = argparse.ArgumentParser(description='PyTorch VDSR')\nparser.add_argument('--dataset', type=str, default='BSDS300',\n required=True, help=\"dataset directory name\")\nparser.add_argument('--crop_size', type=int, default=256,\n required=True, help=\"network input size\")\nparser.add_argument('--upscale_factor', type=int, default=2,\n required=True, help=\"super resolution upscale factor\")\nparser.add_argument('--batch_size', type=int, default=128,\n help=\"training batch size\")\nparser.add_argument('--test_batch_size', type=int,\n default=32, help=\"testing batch size\")\nparser.add_argument('--epochs', type=int, default=100,\n help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.001,\n help='Learning Rate. Default=0.001')\nparser.add_argument(\"--step\", type=int, default=10,\n help=\"Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=10\")\nparser.add_argument(\"--clip\", type=float, default=0.4,\n help=\"Clipping Gradients. Default=0.4\")\nparser.add_argument(\"--weight-decay\", \"--wd\", default=1e-4,\n type=float, help=\"Weight decay, Default: 1e-4\")\nparser.add_argument('--cuda', action='store_true', help='use cuda?')\nparser.add_argument('--threads', type=int, default=16,\n help='number of threads for data loader to use')\nparser.add_argument('--gpuids', default=[0], nargs='+',\n help='GPU ID for using')\nparser.add_argument('--add_noise', action='store_true',\n help='add gaussian noise?')\nparser.add_argument('--noise_std', type=float, default=3.0,\n help='standard deviation of gaussian noise')\nparser.add_argument('--test', action='store_true', help='test mode')\nparser.add_argument('--model', default='', type=str, metavar='PATH',\n help='path to test or resume model')\n\n\ndef main():\n global opt\n opt = parser.parse_args()\n opt.gpuids = list(map(int, opt.gpuids))\n\n print(opt)\n\n if opt.cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU found, please run without --cuda\")\n\n cudnn.benchmark = True\n\n if not opt.test:\n train_set = get_training_set(opt.dataset, opt.crop_size,\n opt.upscale_factor, opt.add_noise, opt.noise_std)\n validation_set = get_validation_set(\n opt.dataset, opt.crop_size, opt.upscale_factor)\n\n test_set = get_test_set(\n opt.dataset, opt.crop_size, opt.upscale_factor)\n\n if not opt.test:\n training_data_loader = DataLoader(\n dataset=train_set, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True)\n validating_data_loader = DataLoader(\n dataset=validation_set, num_workers=opt.threads, batch_size=opt.test_batch_size, shuffle=False)\n\n testing_data_loader = DataLoader(\n dataset=test_set, num_workers=opt.threads, batch_size=opt.test_batch_size, shuffle=False)\n\n model = VDSR()\n criterion = nn.MSELoss()\n\n if opt.cuda:\n torch.cuda.set_device(opt.gpuids[0])\n with torch.cuda.device(opt.gpuids[0]):\n model = model.cuda()\n criterion = criterion.cuda()\n model = nn.DataParallel(model, device_ids=opt.gpuids,\n output_device=opt.gpuids[0])\n\n optimizer = optim.Adam(model.parameters(), lr=opt.lr,\n weight_decay=opt.weight_decay)\n\n if opt.test:\n model_name = join(\"model\", opt.model)\n model = torch.load(model_name)\n start_time = time.time()\n test(model, criterion, testing_data_loader)\n elapsed_time = time.time() - start_time\n print(\"===> average {:.2f} image/sec for test\".format(\n 100.0/elapsed_time))\n return\n\n train_time = 0.0\n validate_time = 0.0\n for epoch in range(1, opt.epochs + 1):\n start_time = time.time()\n train(model, criterion, epoch, optimizer, training_data_loader)\n elapsed_time = time.time() - start_time\n train_time += elapsed_time\n print(\"===> {:.2f} seconds to train this epoch\".format(\n elapsed_time))\n start_time = time.time()\n validate(model, criterion, validating_data_loader)\n elapsed_time = time.time() - start_time\n validate_time += elapsed_time\n print(\"===> {:.2f} seconds to validate this epoch\".format(\n elapsed_time))\n if epoch % 10 == 0:\n checkpoint(model, epoch)\n\n print(\"===> average training time per epoch: {:.2f} seconds\".format(train_time/opt.epochs))\n print(\"===> average validation time per epoch: {:.2f} seconds\".format(validate_time/opt.epochs))\n print(\"===> training time: {:.2f} seconds\".format(train_time))\n print(\"===> validation time: {:.2f} seconds\".format(validate_time))\n print(\"===> total training time: {:.2f} seconds\".format(train_time+validate_time))\n\n\ndef adjust_learning_rate(epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 10 epochs\"\"\"\n lr = opt.lr * (0.1 ** (epoch // opt.step))\n return lr\n\n\ndef train(model, criterion, epoch, optimizer, training_data_loader):\n lr = adjust_learning_rate(epoch-1)\n\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n print(\"Epoch = {}, lr = {}\".format(epoch, optimizer.param_groups[0][\"lr\"]))\n\n epoch_loss = 0\n for iteration, batch in enumerate(training_data_loader, 1):\n input, target = Variable(batch[0]), Variable(\n batch[1], requires_grad=False)\n if opt.cuda:\n input = input.cuda()\n target = target.cuda()\n\n optimizer.zero_grad()\n model_out = model(input)\n loss = criterion(model_out, target)\n epoch_loss += loss.item()\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), opt.clip/lr)\n optimizer.step()\n\n print(\"===> Epoch[{}]({}/{}): Loss: {:.4f}\".format(\n epoch, iteration, len(training_data_loader), loss.item()))\n\n print(\"===> Epoch {} Complete: Avg. Loss: {:.4f}\".format(\n epoch, epoch_loss / len(training_data_loader)))\n\n\ndef validate(model, criterion, validating_data_loader):\n avg_psnr = 0\n for batch in validating_data_loader:\n input, target = Variable(batch[0]), Variable(batch[1])\n if opt.cuda:\n input = input.cuda()\n target = target.cuda()\n\n prediction = model(input)\n mse = criterion(prediction, target)\n psnr = 10 * log10(1.0 / mse.item())\n avg_psnr += psnr\n print(\"===> Avg. PSNR: {:.4f} dB\".format(\n avg_psnr / len(validating_data_loader)))\n\n\ndef test(model, criterion, testing_data_loader):\n avg_psnr = 0\n for batch in testing_data_loader:\n input, target = Variable(batch[0]), Variable(batch[1])\n if opt.cuda:\n input = input.cuda()\n target = target.cuda()\n\n prediction = model(input)\n mse = criterion(prediction, target)\n psnr = 10 * log10(1.0 / mse.item())\n avg_psnr += psnr\n print(\"===> Avg. PSNR: {:.4f} dB\".format(\n avg_psnr / len(testing_data_loader)))\n\n\ndef checkpoint(model, epoch):\n try:\n if not(os.path.isdir('model')):\n os.makedirs(os.path.join('model'))\n except OSError as e:\n if e.errno != errno.EEXIST:\n print(\"Failed to create directory!!!!!\")\n raise\n\n model_out_path = \"model/model_epoch_{}.pth\".format(epoch)\n torch.save(model, model_out_path)\n print(\"Checkpoint saved to {}\".format(model_out_path))\n\n\nif __name__ == '__main__':\n start_time = time.time()\n main()\n elapsed_time = time.time() - start_time\n print(\"===> total time: {:.2f} seconds\".format(elapsed_time))\n" ]
[ [ "torch.cuda.set_device", "torch.load", "torch.utils.data.DataLoader", "torch.autograd.Variable", "torch.cuda.is_available", "torch.cuda.device", "torch.nn.DataParallel", "torch.nn.MSELoss", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anonymous202201/fast_transferable_blackbox_attack
[ "765294e195b32766e11cd71f89500fdc5e44dcdc" ]
[ "fta/models/surrogates/mobilenetv2.py" ]
[ "\"\"\" Adapted mobilenet_v2 pytorch model that is used as surrogate. \"\"\"\r\nimport torch\r\nimport torchvision\r\n\r\nfrom fta.models.surrogates.base import BaseSurrogate\r\nfrom fta.utils.dataset_utils.imagenet_utils import get_imagenet_normalize\r\nfrom fta.utils.torch_utils.model_utils import Normalize\r\n\r\nimport pdb\r\n\r\n\r\nclass TruncatedMobilenetV2PyTorch(torch.nn.Module):\r\n \"\"\" Truncated mobilenet_v2 model. The model is cut in middle and output\r\n intermediate feature maps.\r\n \"\"\"\r\n def __init__(self, layer_idx: int, is_normalize: bool,\r\n custimized_pretrain: str=\"\"):\r\n super(TruncatedMobilenetV2PyTorch, self).__init__()\r\n self._layer_idx = layer_idx\r\n self._is_normalize = is_normalize\r\n if self._is_normalize:\r\n img_mean, img_std = get_imagenet_normalize()\r\n self._normalize = Normalize(img_mean, img_std)\r\n base_model = torchvision.models.mobilenet_v2(\r\n pretrained=True).cuda().eval()\r\n if custimized_pretrain is not None:\r\n print(\"========== Loading custimized weights ==========\")\r\n weight_dict = torch.load(custimized_pretrain)\r\n if isinstance(weight_dict, dict) and \"state_dict\" in weight_dict.keys():\r\n weight_dict = weight_dict[\"state_dict\"]\r\n base_model.load_state_dict(weight_dict)\r\n features = list(base_model.features)[:self._layer_idx]\r\n self._features = torch.nn.ModuleList(features).cuda().eval()\r\n\r\n def forward(self, input_t):\r\n if self._is_normalize:\r\n x = self._normalize(input_t)\r\n else:\r\n x = input_t\r\n \r\n for curt_layer in self._features:\r\n x = curt_layer(x)\r\n return x\r\n\r\n\r\nclass TruncatedMobilenet_V2(BaseSurrogate):\r\n \"\"\" Truncated mobilenet_v2 model wrapped in surrogate class.\r\n \"\"\"\r\n def __init__(self, layer_idx: int=7, is_normalize: bool=True,\r\n custimized_pretrain: str=\"\"):\r\n \"\"\" \r\n mobile net classification best layer_idx: 7\r\n mobile net detection best layer_idx: 13\r\n \"\"\"\r\n print(\"Layer idx: \", layer_idx)\r\n self._mobilenetv2_truncated = TruncatedMobilenetV2PyTorch(\r\n layer_idx=layer_idx, is_normalize=is_normalize,\r\n custimized_pretrain=custimized_pretrain)\r\n super(TruncatedMobilenet_V2, self).__init__(\r\n \"mobilenetv2truncated\", surrogate_model=self._mobilenetv2_truncated)\r\n\r\n def _predict(self, input):\r\n return self._predict_batch(torch.unsqueeze(input, 0))\r\n\r\n def _predict_batch(self, input_batch):\r\n ret = {}\r\n feature = self._mobilenetv2_truncated(input_batch)\r\n ret[\"intermediate_feature\"] = feature\r\n return ret\r\n\r\n\r\nMOBILENETV2T = TruncatedMobilenet_V2\r\n\r\n\r\nclass Mobilenet_V2(BaseSurrogate):\r\n \"\"\" mobilenet_v2 model wrapped in surrogate class.\r\n \"\"\"\r\n def __init__(self, is_normalize: bool=True, **kwargs):\r\n self._is_normalize = is_normalize\r\n if self._is_normalize:\r\n img_mean, img_std = get_imagenet_normalize()\r\n self._normalize = Normalize(img_mean, img_std)\r\n self._mobilenetv2 = torchvision.models.mobilenet_v2(\r\n pretrained=True).cuda().eval()\r\n super(Mobilenet_V2, self).__init__(\r\n \"mobilenetv2\", surrogate_model=self._mobilenetv2)\r\n\r\n def _predict(self, input):\r\n return self._predict_batch(torch.unsqueeze(input, 0))\r\n\r\n def _predict_batch(self, input_batch):\r\n ret = {}\r\n logits = self._mobilenetv2(input_batch)\r\n ret[\"logits\"] = logits\r\n return ret\r\n\r\n\r\nMOBILENETV2 = Mobilenet_V2\r\n" ]
[ [ "torch.nn.ModuleList", "torch.unsqueeze", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kencan7749/bdpy
[ "75b909742aa4767f09823cc98a588c41848292a9" ]
[ "bdpy/bdata/bdata.py" ]
[ "'''BrainDecoderToolbox2/BdPy data class\n\nThis file is a part of BdPy\n\n\nAPI list\n--------\n\n- Data modification\n - add\n - update\n - add_metadata\n - rename_metadata\n - set_metadatadescription\n- Data access\n - select\n - get\n - get_metadata\n - show_metadata\n- File I/O\n - load\n - save\n'''\n\n\n__all__ = ['BData']\n\n\nimport os\nimport sys\nimport warnings\nimport time\nimport datetime\nimport inspect\nimport re\n\nimport h5py\nimport numpy as np\nimport scipy.io as sio\n\nfrom .metadata import MetaData\nfrom .featureselector import FeatureSelector\n\n\n# BData class ##########################################################\n\nclass BData(object):\n '''BrainDecoderToolbox2/BdPy data class\n\n The instance of class `BData` contains `dataset` and `metadata` as instance\n variables.\n\n Parameters\n ----------\n file_name : str, optional\n File which contains BData (default: None)\n file_type : {'Matlab', 'HDF5', 'None'}, optional\n File type (default: None)\n\n If `file_name` was not given, BData.__init__() creates an empty\n dataset and metadata.\n\n Attributes\n ----------\n dataset : numpy array (dtype=float)\n Dataset array\n metadata : metadata object\n Meta-data object\n '''\n\n\n def __init__(self, file_name=None, file_type=None):\n self.__dataset = np.ndarray((0, 0), dtype=float)\n self.__metadata = MetaData()\n self.__header = {}\n self.__vmap = {}\n\n if file_name is not None:\n self.load(file_name, file_type)\n\n # Properties -------------------------------------------------------\n\n # dataset\n @property\n def dataset(self):\n return self.__dataset\n\n @dataset.setter\n def dataset(self, value):\n self.__dataset = value\n\n # metadata\n @property\n def metadata(self):\n return self.__metadata\n\n @metadata.setter\n def metadata(self, value):\n self.__metadata = value\n\n # header\n @property\n def header(self):\n return self.__header\n\n # dataSet\n @property\n def dataSet(self):\n return self.__dataset\n\n @dataSet.setter\n def dataSet(self, value):\n self.__dataset = value\n\n # metaData\n @property\n def metaData(self):\n return self.__metadata\n\n @metaData.setter\n def metaData(self, value):\n self.__metadata = value\n\n # Misc -------------------------------------------------------------\n\n def __obsoleted_method(alternative):\n '''Decorator for obsoleted functions'''\n def __obsoleted_method_in(func):\n import functools\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n funcname = func.__name__\n warnings.warn(\"'%s' is obsoleted and kept for compatibility. Use '%s' instead.\" % (funcname, alternative),\n UserWarning, stacklevel=2)\n return func(*args, **kwargs)\n return wrapper\n return __obsoleted_method_in\n\n\n # Data modification ------------------------------------------------\n\n def add(self, x, name):\n '''Add `x` to dataset as `name`.`\n\n Parameters\n ----------\n x : array\n Data matrix to be added in dataset\n name : str\n Name of the data `x`\n\n Returns\n -------\n None\n '''\n\n if x.ndim == 1:\n x = x[:, np.newaxis]\n\n colnum_has = self.dataset.shape[1] # Num of existing columns in 'dataset'\n colnum_add = x.shape[1] # Num of columns to be added\n\n # Add 'x' to dataset\n if not self.dataset.size:\n self.dataset = x\n else:\n # TODO: Add size check of 'x' and 'self.dataset'\n self.dataset = np.hstack((self.dataset, x))\n\n # Add new metadata\n column_description = '1 = %s' % name\n column_value = [np.nan for _ in range(colnum_has)] + [1 for _ in range(colnum_add)]\n\n self.metadata.set(name, column_value, column_description,\n lambda x, y: np.hstack((y[:colnum_has], x[-colnum_add:])))\n\n\n @__obsoleted_method('add')\n def add_dataset(self, x, attribute_key):\n '''Add `x` to dataset with attribute meta-data key `attribute_key`\n\n Parameters\n ----------\n x : array\n Data matrix to be added in dataset\n attribute_key : str\n Key of attribute meta-data, which specifies the columns containing `x`\n\n Returns\n -------\n None\n '''\n return self.add(x, attribute_key)\n\n\n def update(self, key, dat):\n '''Update dataset\n\n Parameters\n ----------\n key : str\n Name of columns to be updated\n dat : array_like\n New data array\n\n Returns\n -------\n None\n '''\n mdind = [a == 1 for a in self.get_metadata(key)]\n self.dataset[:, np.array(mdind)] = dat\n\n\n def add_metadata(self, key, value, description='', where=None, attribute=None):\n '''Add meta-data with `key`, `description`, and `value` to metadata\n\n Parameters\n ----------\n key : str\n Meta-data key\n value : array\n Meta-data array\n description : str, optional\n Meta-data description\n where : str, optional\n Meta-data key masking the columns in the dataset\n\n Returns\n -------\n None\n '''\n\n # TODO: Add attribute specifying\n # TODO: Add size check of metadata/value\n\n if attribute is not None:\n warnings.warn(\"Keyword argument 'attribute' is obsoleted and kept for compatibility. Use 'where' instead.\",\n UserWarning, stacklevel=2)\n if where is not None:\n warnings.warn(\"Value of 'attribute' is overridden by 'where'.\",\n UserWarning, stacklevel=2)\n else:\n where = attribute\n\n if where is not None:\n attr_ind = self.metadata.get(where, 'value') == 1\n add_value = np.array([np.nan for _ in range(self.metadata.get_value_len())])\n add_value[attr_ind] = value\n else:\n add_value = value\n\n self.metadata.set(key, add_value, description)\n\n\n def merge_metadata(self, key, sources, description='', where=None, method='logical_or'):\n '''Merage metadata rows.'''\n\n if not method == 'logical_or':\n raise NotImplementedError('Only `logical_or` is implemented')\n\n if where is None:\n raise ValueError('You need to specify `where`.')\n\n mdv_lst = [self.get_metadata(s, where=where) for s in sources]\n mdv_new = np.nansum(np.vstack(mdv_lst), axis=0)\n mdv_new[mdv_new > 1] = 1\n\n self.add_metadata(key, mdv_new, description, where=where)\n\n\n def rename_metadata(self, key_old, key_new):\n '''Rename meta-data key\n\n Parameters\n ----------\n key_old, key_new : str\n Old and new meta-data keys\n\n Returns\n -------\n None\n '''\n self.metadata.key[self.metadata.key.index(key_old)] = key_new\n return None\n\n\n def set_metadatadescription(self, key, description):\n '''Set description of metadata specified by `key`\n\n Parameters\n ----------\n key : str\n Meta-data key\n description : str\n Meta-data description\n\n Returns\n -------\n None\n '''\n\n self.metadata.set(key, None, description,\n lambda x, y: y)\n\n\n @__obsoleted_method('set_metadatadescription')\n def edit_metadatadescription(self, metakey, description):\n '''Set description of metadata specified by `key`\n\n Parameters\n ----------\n key : str\n Meta-data key\n description : str\n Meta-data description\n\n Returns\n -------\n None\n '''\n self.set_metadatadescription(metakey, description)\n\n def update_header(self, header):\n '''Update header.'''\n self.__header.update(header)\n\n def applyfunc(self, func, where=None, **kargs):\n '''Apply `func` to the dataset.'''\n\n if where is None:\n # FIXME\n fout = func(self.dataset, **kargs)\n\n if isinstance(fout, tuple):\n self.dataset = fout[0]\n else:\n self.dataset = fout\n else:\n # FIXME\n if not isinstance(where, list):\n where = [where]\n\n data_selector = '|'.join([w + ' = 1' for w in where])\n\n x, x_ind = self.select(data_selector, return_index=True)\n\n fout = func(x, **kargs)\n\n if isinstance(fout, tuple):\n # Index mapping\n ind_map = fout[1]\n\n ds = np.zeros((len(ind_map), self.dataset.shape[1]))\n\n index = np.zeros(self.dataset.shape[1], dtype=bool)\n index[x_ind] = True\n\n #import pdb; pdb.set_trace()\n\n ds[:, index] = fout[0]\n ds[:, ~index] = self.dataset[np.ix_(ind_map, ~index)]\n\n self.dataset = ds\n else:\n # No index mapping\n self.dataset[:, x_ind] = fout\n\n return self\n\n # Data access ------------------------------------------------------\n\n def select(self, condition, return_index=False, verbose=True):\n '''Select data (columns) from dataset.\n\n Parameters\n ----------\n condition : str\n Condition specifying columns.\n retrun_index : bool, optional\n If True, return index of selected columns (default: False).\n verbose : bool, optional\n If True, display verbose messages (default: True).\n\n Returns\n -------\n array-like\n Selected data\n list, optional\n Selected index\n\n Note\n ----\n The following operators are acceptable in `condition`.\n\n - | (or)\n - & (and)\n - = (equal)\n - @ (conditional)\n '''\n\n expr_rpn = FeatureSelector(condition).rpn\n\n stack = []\n buf_sel = []\n\n for i in expr_rpn:\n if i == '=':\n r = stack.pop()\n l = stack.pop()\n\n stack.append(np.array([n == r for n in l], dtype=bool))\n\n elif i == 'top':\n # Dirty solution\n\n # Need fix on handling 'None'\n\n n = int(stack.pop()) # Num of elements to be selected\n v = stack.pop()\n\n order = self.__get_order(v)\n\n stack.append(order)\n buf_sel.append(n)\n\n elif i in ['|', '&', '-']:\n r = stack.pop()\n l = stack.pop()\n\n if r.dtype != 'bool':\n # 'r' should be an order vector\n num_sel = buf_sel.pop()\n r = self.__get_top_elm_from_order(r, num_sel)\n #r = np.array([ n < num_sel for n in r ], dtype = bool)\n\n if l.dtype != 'bool':\n # 'l' should be an order vector\n num_sel = buf_sel.pop()\n l = self.__get_top_elm_from_order(l, num_sel)\n #l = np.array([ n < num_sel for n in l ], dtype = bool)\n\n if i == '|':\n result = np.logical_or(l, r)\n elif i == '&':\n result = np.logical_and(l, r)\n elif i == '-':\n result = np.logical_and(l, np.logical_not(r))\n\n stack.append(result)\n\n elif i == '@':\n # FIXME\n # In the current version, the right term of '@' is assumed to\n # be a boolean, and the left is to be an order vector.\n\n r = stack.pop() # Boolean\n l = stack.pop() # Float\n\n l[~r] = np.inf\n\n selind = self.__get_top_elm_from_order(l, buf_sel.pop())\n\n stack.append(np.array(selind))\n\n else:\n if isinstance(i, str):\n if i.isdigit():\n # 'i' should be a criteria value\n i = float(i)\n else:\n # 'i' should be a meta-data key\n i = self.__metadata_key_to_bool_vector(i)\n\n stack.append(i)\n\n selected_index = stack.pop()\n\n # If buf_sel still has an element, `select_index` should be an order vector.\n # Select N elements based on the order vector.\n if buf_sel:\n num_sel = buf_sel.pop()\n selected_index = [n < num_sel for n in selected_index]\n\n # Very dirty solution\n selected_index = np.array(selected_index) == True\n\n if return_index:\n return self.dataset[:, np.array(selected_index)], selected_index\n else:\n return self.dataset[:, np.array(selected_index)]\n\n\n @__obsoleted_method('select')\n def select_dataset(self, condition, return_index=False, verbose=True):\n '''Select data (columns) from dataset.\n\n Parameters\n ----------\n condition : str\n Condition specifying columns.\n retrun_index : bool, optional\n If True, return index of selected columns (default: False).\n verbose : bool, optional\n If True, display verbose messages (default: True).\n\n Returns\n -------\n array-like\n Selected data\n list, optional\n Selected index\n\n Note\n ----\n The following operators are acceptable in `condition`.\n\n - | (or)\n - & (and)\n - = (equal)\n - @ (conditional)\n '''\n return self.select(condition, return_index, verbose)\n\n\n @__obsoleted_method('select')\n def select_feature(self, condition, return_index=False, verbose=True):\n '''Select data (columns) from dataset.\n\n Parameters\n ----------\n condition : str\n Condition specifying columns.\n retrun_index : bool, optional\n If True, return index of selected columns (default: False).\n verbose : bool, optional\n If True, display verbose messages (default: True).\n\n Returns\n -------\n array-like\n Selected data\n list, optional\n Selected index\n\n Note\n ----\n The following operators are acceptable in `condition`.\n\n - | (or)\n - & (and)\n - = (equal)\n - @ (conditional)\n '''\n return self.select(condition, return_index, verbose)\n\n\n def get(self, key=None):\n '''Get dataset\n\n When `key` is not given, `get_dataset` returns `dataset`. When `key` is\n given, `get_dataset` returns data specified by `key`\n '''\n\n if key is None:\n return self.dataset\n else:\n query = '%s = 1' % key\n return self.select(query, return_index=False, verbose=False)\n\n\n @__obsoleted_method('get')\n def get_dataset(self, key=None):\n '''Get dataset\n\n When `key` is not given, `get_dataset` returns `dataset`. When `key` is\n given, `get_dataset` returns data specified by `key`\n '''\n return self.get(key)\n\n\n def get_metadata(self, key, where=None):\n '''Get value of meta-data specified by `key`\n\n Parameters\n ----------\n key : str\n Meta-data key.\n\n where : str, optional\n Columns which mask meta-data array.\n\n Returns\n -------\n array-like\n '''\n\n md = self.metadata.get(key, 'value')\n\n if where != None:\n # Mask the metadata array with columns specified with `where`\n ind = self.metadata.get(where, 'value') == True\n md = md[ind]\n\n return md\n\n\n def show_metadata(self):\n '''Show all the key and description in metadata'''\n\n # Get max length\n max_key = max([len(k) for k in self.metadata.key])\n max_desc = max([len(k) for k in self.metadata.description])\n\n # Disp header\n print('| ' + 'Key' + ' ' * (max_key - 3) + ' | ' + 'Description' + ' ' * (max_desc - 11) + ' |')\n print('|-' + '-' * max_key + '-|-' + '-' * max_desc + '-|')\n\n # Disp key and description\n for k, d in zip(self.metadata.key, self.metadata. description):\n print('| ' + k + ' ' * (max_key - len(k)) + ' | ' + d + ' ' * (max_desc - len(d)) + ' |')\n\n # Value-label map --------------------------------------------------------\n\n def get_labels(self, key):\n '''Get `key` as labels.'''\n if not key in self.__vmap:\n raise ValueError('Key not found in vmap: %s' % key)\n value = self.select(key).flatten()\n label = []\n for x in value:\n if np.isnan(x):\n v = 'n/a'\n else:\n v = self.__vmap[key][x]\n label.append(v)\n return label\n\n def get_label(self, key):\n '''Get `key` as labels.'''\n return self.get_labels(key)\n\n def get_vmap(self, key):\n '''Returns vmap of `key`.'''\n if key in self.__vmap:\n return self.__vmap[key]\n else:\n raise ValueError('%s not found in vmap' % key)\n\n def get_vmap_keys(self):\n return self.__vmap.keys()\n\n def add_vmap(self, key, vmap):\n '''Add vmap.'''\n if not key in self.__metadata.key:\n raise ValueError('%s not found in metadata.' % key)\n\n if type(vmap) is not dict:\n raise TypeError('`vmap` should be a dictionary.')\n for k in vmap.keys():\n if type(k) is str:\n raise TypeError('Keys of `vmap` should be numerical.')\n\n if key in self.__vmap:\n # Check vmap consistency\n if self.__check_vmap_consistency(vmap, self.__vmap[key]):\n vmap.update(self.__vmap[key])\n vmap_add = self.__get_act_vmap(key, vmap)\n self.__vmap[key].update(vmap_add)\n else:\n raise ValueError('Invalid vmap: labels are inconsistent between old and new vmaps.')\n else:\n vmap_add = self.__get_act_vmap(key, vmap)\n self.__vmap.update({key: vmap_add})\n\n return None\n\n def __get_act_vmap(self, key, vmap):\n values = np.unique(self.get(key))\n try:\n vmap_add = {}\n for val in values:\n if np.isnan(val):\n continue\n vmap_add.update({val: vmap[val]})\n except KeyError:\n raise ValueError('Invalid vmap: label for %f not found.' % val)\n return vmap_add\n\n def __check_vmap_consistency(self, vmap_new, vmap_old):\n for key in vmap_new.keys():\n if not key in vmap_old:\n continue\n if vmap_old[key] != vmap_new[key]:\n print('Inconsistent label:')\n print(' Key: %f' % key)\n print(' Old label: %s' % vmap_old[key])\n print(' New label: %s' % vmap_new[key])\n return False\n return True\n\n # File I/O---------------------------------------------------------\n\n def load(self, load_filename, load_type=None):\n '''Load 'dataset' and 'metadata' from a given file'''\n\n if load_type is None:\n load_type = self.__get_filetype(load_filename)\n\n if load_type == \"Matlab\":\n self.__load_mat(load_filename)\n elif load_type == \"HDF5\":\n self.__load_h5(load_filename)\n else:\n raise ValueError(\"Unknown file type: %s\" % (load_type))\n\n\n def save(self, file_name, file_type=None):\n '''Save 'dataset' and 'metadata' to a file'''\n\n # Store data creation information\n t_now = time.time()\n t_now_str = datetime.datetime.fromtimestamp(t_now).strftime('%Y-%m-%d %H:%M:%S')\n\n callstack = []\n callstack_code = []\n f = inspect.currentframe()\n while True:\n f = f.f_back\n if f is None: break\n fname = os.path.abspath(f.f_code.co_filename)\n fline = f.f_lineno\n callstack.append('%s:%d' % (fname, fline))\n if os.path.exists(fname):\n with open(fname, 'r') as fl:\n fcode = fl.read()\n else:\n fcode = ''\n callstack_code.append(fcode)\n\n self.__header.update({'ctime': t_now_str,\n 'ctime_epoch': t_now,\n 'callstack': callstack,\n 'callstack_code': callstack_code})\n\n if file_type is None:\n file_type = self.__get_filetype(file_name)\n\n if file_type == \"Matlab\":\n raise RuntimeError('Saving BData as a mat file is no longer supported. Please save the data as HDF5 (.h5).')\n elif file_type == \"HDF5\":\n self.__save_h5(file_name, header=self.__header)\n\n else:\n raise ValueError(\"Unknown file type: %s\" % (file_type))\n\n\n # Private methods --------------------------------------------------\n\n def __metadata_key_to_bool_vector(self, key):\n '''Convert meta-dat key(s) to boolean vector.'''\n key_esc = re.escape(key).replace('\\*', '.*')\n keys = [k for k in self.metadata.key if re.match(key_esc, k)]\n if len(keys) == 0:\n raise RuntimeError('Meta-data %s not found' % key)\n vals = np.vstack([\n self.get_metadata(k)\n for k in keys])\n vals = (vals == 1)\n vec = np.sum(vals, axis=0).astype(bool)\n return vec\n\n def __get_order(self, v, sort_order='descend'):\n\n # 'np.nan' comes to the last of an acending series, and thus the top of a decending series.\n # To avoid that, convert 'np.nan' to -Inf.\n v[np.isnan(v)] = -np.inf\n\n sorted_index = np.argsort(v)[::-1] # Decending order\n order = np.arange(len(v))\n for i, x in enumerate(sorted_index):\n order[x] = i\n\n return np.array(order, dtype=float)\n\n\n def __get_top_elm_from_order(self, order, n):\n '''Get a boolean index of top `n` elements from `order`'''\n sorted_index = np.argsort(order)\n for i, x in enumerate(sorted_index):\n order[x] = i\n\n index = np.array([r < n for r in order], dtype=bool)\n\n return index\n\n\n def __save_h5(self, file_name, header=None):\n '''Save data in HDF5 format (*.h5)'''\n with h5py.File(file_name, 'w') as h5file:\n # dataset\n h5file.create_dataset('/dataset', data=self.dataset)\n\n # metadata\n md_keys = self.metadata.key\n md_desc = self.metadata.description\n md_vals = self.metadata.value\n\n h5file.create_group('/metadata')\n h5file.create_dataset('/metadata/key', data=[self.__to_bytes(x) for x in md_keys])\n h5file.create_dataset('/metadata/description', data=[self.__to_bytes(x) for x in md_desc])\n h5file.create_dataset('/metadata/value', data=md_vals)\n\n # header\n if header is not None:\n h5file.create_group('/header')\n for k, v in header.items():\n if isinstance(v, list):\n h5file.create_dataset('/header/' + k, data=[self.__to_bytes(x) for x in v])\n else:\n h5file.create_dataset('/header/' + k, data=self.__to_bytes(v)) # FIXME: save unicode str as is\n\n # vmap\n h5file.create_group('/vmap')\n for mk, vm in self.__vmap.items():\n h5file.create_group('/vmap/' + mk)\n for k, v in vm.items():\n h5file.create_dataset('/vmap/' + mk + '/' + str(k), data=self.__to_bytes(v)) # FIXME: save unicode str as is\n\n def __load_mat(self, load_filename):\n '''Load dataset and metadata from Matlab file'''\n\n dat = sio.loadmat(load_filename)\n\n if 'metaData' in dat:\n md_keys = [str(i[0]).strip() for i in np.asarray(dat[\"metaData\"]['key'][0, 0])[0].tolist()]\n md_descs = [str(i[0]).strip() for i in np.asarray(dat[\"metaData\"]['description'][0, 0])[0].tolist()]\n md_values = np.asarray(dat[\"metaData\"]['value'][0, 0])\n else:\n md_keys = [str(i[0]).strip() for i in np.asarray(dat[\"metadata\"]['key'][0, 0])[0].tolist()]\n md_descs = [str(i[0]).strip() for i in np.asarray(dat[\"metadata\"]['description'][0, 0])[0].tolist()]\n md_values = np.asarray(dat[\"metadata\"]['value'][0, 0])\n\n if 'dataSet' in dat:\n self.dataset = np.asarray(dat[\"dataSet\"])\n else:\n self.dataset = np.asarray(dat[\"dataset\"])\n\n if 'header' in dat:\n self.__header = dat['header']\n\n self.__metadata.key = md_keys\n self.__metadata.value = md_values\n self.__metadata.description = md_descs\n\n\n def __load_h5(self, load_filename):\n '''Load dataset and metadata from HDF5 file'''\n\n dat = h5py.File(load_filename, 'r')\n\n if 'metaData' in dat:\n md_keys = [self.__to_unicode(x) for x in dat[\"metaData\"]['key'][:].tolist()]\n md_descs = [self.__to_unicode(x) for x in dat[\"metaData\"]['description'][:].tolist()]\n md_values = np.asarray(dat[\"metaData\"]['value'], dtype=np.float)\n else:\n md_keys = [self.__to_unicode(x) for x in dat[\"metadata\"]['key'][:].tolist()]\n md_descs = [self.__to_unicode(x) for x in dat[\"metadata\"]['description'][:].tolist()]\n md_values = np.asarray(dat[\"metadata\"]['value'], dtype=np.float)\n\n if 'dataSet' in dat:\n self.dataset = np.asarray(dat[\"dataSet\"], dtype=np.float)\n else:\n self.dataset = np.asarray(dat[\"dataset\"], dtype=np.float)\n\n if 'header' in dat:\n for k, v in dat['header'].items():\n k = self.__to_unicode(k)\n if isinstance(v[()], np.ndarray):\n v = [self.__to_unicode(x) for x in v[()]]\n else:\n v = self.__to_unicode(v[()])\n self.__header.update({k: v})\n\n if 'vmap' in dat:\n for mk in dat['vmap'].keys():\n vmap = {}\n for k in dat['vmap'][mk].keys():\n vmap.update({float(k): self.__to_unicode(dat['vmap'][mk][k][()])})\n # TODO: fix this dirty solution\n if sys.version_info.major == 2:\n mk = mk.encode('utf-8')\n self.__vmap.update({mk: vmap})\n\n self.__metadata.key = md_keys\n self.__metadata.value = md_values\n self.__metadata.description = md_descs\n\n def __to_unicode(self, s):\n '''Convert s (bytes) to unicode str.'''\n if sys.version_info.major == 3:\n if isinstance(s, bytes):\n return s.decode('utf-8')\n return s\n\n def __to_bytes(self, s):\n '''Convert s (unicode str) to bytes.'''\n if sys.version_info.major == 3:\n if isinstance(s, str):\n return s.encode('utf-8')\n return s\n\n def __get_filetype(self, file_name):\n '''Return the type of `file_name` based on the file extension'''\n\n _, ext = os.path.splitext(file_name)\n\n if ext == \".mat\":\n file_type = \"Matlab\"\n elif ext == \".h5\":\n file_type = \"HDF5\"\n else:\n raise ValueError(\"Unknown file extension: %s\" % (ext))\n\n return file_type\n" ]
[ [ "numpy.hstack", "numpy.ix_", "numpy.logical_not", "numpy.logical_and", "numpy.isnan", "numpy.asarray", "scipy.io.loadmat", "numpy.ndarray", "numpy.logical_or", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
tlambert03/pycudadecon
[ "66eedccd11d738c1ec4afac8da542dfbcc3a26a0" ]
[ "pycudadecon/_ctyped.py" ]
[ "import ctypes\nimport functools\nimport os\nimport sys\nfrom ctypes.util import find_library\nfrom inspect import Parameter, signature\nfrom typing import Callable, Optional, Type\n\nimport numpy as np\n\nif sys.version_info >= (3, 7):\n from typing_extensions import Annotated, get_args, get_origin\nelse:\n # TODO: remove when py3.6 support is dropped\n from typing import Generic, GenericMeta\n\n from typing_extensions import Annotated, AnnotatedMeta\n\n def get_origin(tp):\n if isinstance(tp, AnnotatedMeta):\n return Annotated\n if isinstance(tp, GenericMeta):\n return tp.__origin__\n if tp is Generic:\n return Generic\n return None\n\n def get_args(tp):\n \"\"\"Get type arguments with all substitutions performed.\"\"\"\n if isinstance(tp, AnnotatedMeta):\n return (tp.__args__[0],) + tp.__metadata__\n if isinstance(tp, GenericMeta):\n import collections\n\n res = tp.__args__\n if tp.__origin__ is collections.abc.Callable and res[0] is not Ellipsis:\n res = (list(res[:-1]), res[-1])\n return res\n return ()\n\n\nclass Library:\n def __init__(self, name: str):\n self.name = name\n\n _file = name\n if not _file or not os.path.exists(_file):\n _file = find_library(name.replace(\"lib\", \"\", 1)) # type: ignore\n if not _file or not os.path.exists(_file):\n _file = find_library(name) # type: ignore\n\n self.lib = ctypes.CDLL(_file)\n if not self.lib._name:\n raise FileNotFoundError(f\"Unable to find library: {self.name}\")\n\n def function(self, func: Callable) -> Callable:\n func_c = getattr(self.lib, func.__name__)\n\n sig = signature(func)\n func_c.restype = cast_type(sig.return_annotation)\n func_c.argtypes = [cast_type(p.annotation) for p in sig.parameters.values()]\n\n class CTypesFunction:\n def __init__(self, func):\n self._func = func\n functools.update_wrapper(self, func)\n\n @property\n def __signature__(self):\n return sig\n\n def __call__(self, *args, **kw):\n return self._func(*args, **kw)\n\n def __repr__(_self):\n return (\n f\"<CTypesFunction: {os.path.basename(self.name)}.{func.__name__}>\"\n )\n\n return CTypesFunction(func_c)\n\n\ndef cast_type(hint: Type) -> Optional[Type]:\n\n if isinstance(hint, str):\n raise ValueError(\"forward ref typehints not supported\")\n\n if get_origin(hint) is Annotated:\n args = get_args(hint)\n if args and args[0] is np.ndarray:\n c_type = np.ctypeslib.as_ctypes_type(np.dtype(args[1]))\n return np.ctypeslib.ndpointer(c_type, flags=\"C_CONTIGUOUS\")\n\n return {\n None: None,\n Parameter.empty: None,\n bool: ctypes.c_bool,\n float: ctypes.c_float,\n int: ctypes.c_int,\n str: ctypes.c_char_p,\n np.ndarray: np.ctypeslib.ndpointer(ctypes.c_float, flags=\"C_CONTIGUOUS\"),\n }[hint]\n" ]
[ [ "numpy.ctypeslib.ndpointer", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jonahcullen/Camoco
[ "2e95950f996329e27c00e5155e3768c0de9b8b7b" ]
[ "camoco/Ontology.py" ]
[ "#!/usr/bin/python3\n\nfrom .Camoco import Camoco\nfrom .RefGen import RefGen\nfrom .Locus import Locus\nfrom .Term import Term\n\nfrom pandas import DataFrame\nfrom scipy.stats import hypergeom\nfrom itertools import chain\nfrom functools import lru_cache\nfrom collections import OrderedDict\n\nimport sys\nimport copy\nimport numpy\nimport camoco as co\nimport pandas as pd\n\nclass Ontology(Camoco):\n '''\n An Ontology is just a collection of terms. Each term is just a\n collection of genes. Sometimes terms are related or nested\n within each other, sometimes not. Simple enough.\n \n Parameters\n ----------\n name : unique identifier\n\n Returns\n -------\n An Ontology Object\n\n '''\n def __init__(self, name, type='Ontology'):\n super().__init__(name, type=type)\n if self.refgen:\n self.refgen = RefGen(self.refgen)\n\n def __len__(self):\n '''\n Return the number of non-empty terms\n '''\n return self.num_terms(min_term_size=1)\n\n def __iter__(self):\n return self.iter_terms()\n\n def num_terms(self,min_term_size=0,max_term_size=10e10):\n '''\n Returns the number of terms in the Ontology\n within the min_term_size and max_term_size\n\n Parameters\n ----------\n min_term_size (default:0)\n The minimum number of loci associated with the term \n max_term_size (default: 10e10)\n The maximum number of loci associated with the term\n\n Returns\n -------\n the number of terms that fit the criteria\n\n '''\n return self.db.cursor().execute(\n '''SELECT COUNT(*) FROM (\n SELECT DISTINCT(term) FROM term_loci \n GROUP BY term \n HAVING COUNT(term) >= ? \n AND COUNT(term) <= ?\n );''',\n (min_term_size, max_term_size)\n ).fetchone()[0]\n\n @lru_cache(maxsize=131072)\n def __getitem__(self, id):\n ''' retrieve a term by id '''\n try:\n (id, desc) = self.db.cursor().execute(\n 'SELECT * from terms WHERE id = ?', (id, )\n ).fetchone()\n term_loci = [\n self.refgen[gene_id] for gene_id, in self.db.cursor().execute(\n ''' SELECT id FROM term_loci WHERE term = ?''', (id, )\n ).fetchall()]\n term_attrs = {k:v for k,v in self.db.cursor().execute(\n ''' SELECT key,val FROM term_attrs WHERE term = ?''',(id,) \n )\n }\n return Term(id, desc=desc, loci=term_loci,**term_attrs)\n except TypeError as e: # Not in database\n raise e\n\n def terms_containing(self,locus_list,max_term_size=10e10,min_term_size=0):\n '''\n Retrurns the set of terms which contains the \n specified loci.\n\n Parameters\n ----------\n locus_list : iterable of type Locus\n The list of loci for which to retrieve \n corresponding terms.\n max_term_size : int (default: 10e10)\n The maximum term size for which to test enrichment. Useful\n for filtering out large terms that would otherwise be \n uninformative (e.g. top level GO terms)\n min_term_size : int (default: 0)\n The minimum term size for which to test enrichment. Useful\n for filtering out very small terms that would be uninformative\n (e.g. single gene terms)\n\n Returns\n -------\n list of terms which contain provided loci\n '''\n # Filter to unique set\n locus_list = set(locus_list)\n # query the database\n terms = self.db.cursor().execute('''SELECT DISTINCT term \n FROM term_loci WHERE id IN ('{}')\n '''.format(\n \"','\".join([x.id for x in locus_list])\n )).fetchall()\n # Fetch the terms with the proper size\n terms = list(\n filter(\n lambda t: (len(t) >= min_term_size) and (len(t) <= max_term_size),\n [self[name] for name, in terms]\n )\n )\n return terms\n\n\n def num_distinct_loci(self):\n return self.db.cursor().execute(\n 'SELECT COUNT(DISTINCT(id)) FROM term_loci;'\n ).fetchone()[0]\n\n def distinct_loci_ids(self):\n return [x[0] for x in self.db.cursor().execute(\n 'SELECT DISTINCT(id) FROM term_loci'\n )]\n\n def iter_terms(self,min_term_size=0,max_term_size=10e10):\n '''\n Return a generator that iterates over each term in the ontology.\n '''\n terms = self.db.cursor().execute('''\n SELECT term from term_loci\n GROUP BY term\n HAVING COUNT(term) >= ?\n AND COUNT(term) <= ?\n ''',(min_term_size,max_term_size))\n for id, in terms:\n yield self[id]\n\n def terms(self,min_term_size=0,max_term_size=10e10):\n return list(self.iter_terms(min_term_size=min_term_size,max_term_size=max_term_size))\n\n def summary(self):\n return \"Ontology:{} - desc: {} - contains {} terms for {}\".format(\n self.name, self.description, len(self), self.refgen)\n\n def rand(self, n=1, min_term_size=1, max_term_size=100000):\n '''\n Return a random Term from the Ontology\n\n Parameters\n ----------\n n : int (default: 1)\n The number of random terms to return\n min_term_size : int (default: 1)\n The smallest acceptable term size\n i.e. the number of genes annotated to the term\n max_term_size : int (default: 100000)\n The largest acceptable term size\n '''\n cur = self.db.cursor()\n ids = cur.execute(''' \n SELECT term FROM term_loci \n GROUP BY term \n HAVING COUNT(term) >= ?\n AND COUNT(term) <= ?\n ORDER BY RANDOM() \n LIMIT ?;\n ''',(min_term_size,max_term_size,n)).fetchall()\n if len(ids) == 0:\n raise ValueError(\n 'No Terms exists with this criteria '\n '{} < len(term) < {}:'.format(min_term_size,max_term_size)\n )\n terms = [self[id[0]] for id in ids]\n if len(terms) == 1:\n return terms[0]\n else:\n return terms\n\n def add_term(self, term, cursor=None, overwrite=False):\n ''' \n This will add a single term to the ontology\n\n Parameters\n ----------\n term : Term object\n The term object you wish to add.\n cursor : apsw cursor object\n A initialized cursor object, for batch operation. This will\n allow for adding many terms in one transaction as long as the \n passed in cursor has executed the \"BEGIN TRANSACTION\" command.\n overwrite : bool\n Indication to delete any existing entry before writing\n '''\n\n if overwrite:\n self.del_term(term.id)\n if not cursor:\n cur = self.db.cursor()\n cur.execute('BEGIN TRANSACTION')\n else:\n cur = cursor\n\n # Add the term id and description\n cur.execute('''\n INSERT OR ABORT INTO terms (id, desc)\n VALUES (?, ?)''', (term.id, term.desc))\n\n # Add the term loci\n if term.loci:\n for locus in term.loci:\n cur.execute('''\n INSERT OR ABORT INTO term_loci (term, id)\n VALUES (?, ?)\n ''', (term.id, locus.id))\n\n # Add the term attrs\n if term.attrs:\n for key,val in term.attrs.items():\n cur.execute('''\n INSERT OR ABORT INTO term_attrs (term,key,val)\n VALUES (?,?)\n ''',(term.id,key,val))\n\n if not cursor:\n cur.execute('END TRANSACTION')\n\n def del_term(self, term, cursor=None):\n ''' This will delete a single term to the ontology\n\n Parameters\n ----------\n term : Term object or str\n The term object or id you wish to remove.\n cursor : apsw cursor object\n A initialized cursor object, for batch operation.'''\n\n try:\n if not cursor:\n cur = self.db.cursor()\n cur.execute('BEGIN TRANSACTION')\n else:\n cur = cursor\n \n if not isinstance(term, str):\n id = term.id\n else:\n id = term\n \n cur.execute('''\n DELETE FROM term_loci WHERE term = ?;\n DELETE FROM terms WHERE id = ?;\n ''', (id, id))\n if not cursor:\n cur.execute('END TRANSACTION')\n except Exception as e:\n cur.execute('ROLLBACK')\n raise e\n\n\n def add_terms(self, terms, overwrite=True):\n '''\n A Convenience function to add terms from an iterable.\n\n Parameters\n ----------\n terms : iterable of camoco.Term objects\n '''\n if overwrite:\n self.del_terms(terms)\n cur = self.db.cursor()\n cur.execute('BEGIN TRANSACTION')\n for term in terms:\n self.add_term(term, cursor=cur, overwrite=False)\n cur.execute('END TRANSACTION')\n\n def del_terms(self, terms):\n '''\n A Convenience function to delete many term object\n\n Parameters\n ----------\n terms : iterable of camoco.Term objects.\n '''\n cur = self.db.cursor()\n cur.execute('BEGIN TRANSACTION')\n for term in terms:\n self.del_term(term, cursor=cur)\n cur.execute('END TRANSACTION')\n\n def set_strongest(self,attr=None,higher=None):\n '''\n Convinience function that allows you to set default values for\n strongest SNP2Gene mapping tasks.\n\n Parameters\n ----------\n attr: The locus attr used to determine which locus is the \n strongest locus.\n \n higher: Flag indicating whether the value in --strongest-attr\n is stronger if it is higher. Default behavior is to\n treatlower values as stronger (i.e. p-vals)\n '''\n if not(attr is None):\n self._global('strongest_attr',attr)\n if not(higher is None):\n self._global('strongest_higher',higher)\n\n def get_strongest_attr(self):\n '''\n Convinience function that allows you to get the default value for\n the locus attr used to determine which locus is the strongest locus\n strongest SNP2Gene mapping.\n '''\n return self._global('strongest_attr')\n \n def get_strongest_higher(self):\n '''\n Convinience function that allows you to get default values for\n the flag indicating whether the value in `strongest-attr` is\n is stronger if higher for strongest SNP2Gene mapping tasks.\n '''\n return self._global('strongest_higher')\n\n\n @classmethod\n def create(cls, name, description, refgen, type='Ontology'):\n '''\n This method creates a fresh Ontology with nothing it it.\n '''\n # run the inherited create method from Camoco\n self = super().create(name, description, type=type)\n # set the refgen for the current instance\n self.refgen = refgen\n # add the global refgen\n self._global('refgen', refgen.name)\n # build the tables\n self._create_tables()\n return self\n \n @classmethod\n def from_DataFrame(cls, dataframe, name, description, refgen,\n gene_col='gene',term_col='Term'):\n '''\n Convenience function to create a Ontology from an iterable\n terms object. \n\n Parameters\n ----------\n dataframe : pandas.DataFrame\n A pandas dataframe containing the mapping betweeen gene ids\n and \n name : str\n The name of the camoco object to be stored in the database.\n description : str\n A short message describing the dataset.\n refgen : camoco.RefGen\n A RefGen object describing the genes in the dataset\n\n Optional Parameters\n -------------------\n gene_col : str (default: gene)\n The string designating the column in the dataframe containing\n gene names (ids)\n term_col : str (default: Term)\n The string designating the column in the dataframe containing\n the term name.\n\n '''\n self = cls.create(name,description,refgen)\n # create terms from \n terms = [\n Term(id,loci=refgen[set(df[gene_col])]) \\\n for id,df in dataframe.groupby(term_col)\n ]\n self.log('Adding {} terms to the database.',len(terms))\n self.add_terms(terms, overwrite=True)\n # Build the indices\n self.log('Building the indices.')\n self._build_indices()\n self.log('Your gene ontology is built.')\n return self\n\n @classmethod\n def from_terms(cls, terms, name, description, refgen):\n '''\n Convenience function to create a Ontology from an iterable\n terms object. \n\n Parameters\n ----------\n terms : iterable of camoco.GOTerm objects\n Items to add to the ontology. The key being the name\n of the term and the items being the loci.\n name : str\n The name of the camoco object to be stored in the database.\n description : str\n A short message describing the dataset.\n refgen : camoco.RefGen\n A RefGen object describing the genes in the dataset\n '''\n self = cls.create(name,description,refgen)\n self.log('Adding {} terms to the database.',len(terms))\n self.add_terms(terms, overwrite=True)\n # Build the indices\n self.log('Building the indices.')\n self._build_indices()\n\n self.log('Your gene ontology is built.')\n return self\n\n def _create_tables(self):\n cur = self.db.cursor()\n cur.execute('''\n CREATE TABLE IF NOT EXISTS terms (\n id TEXT UNIQUE,\n desc TEXT\n )'''\n )\n cur.execute('''\n CREATE TABLE IF NOT EXISTS term_loci (\n term TEXT, \n id TEXT\n );'''\n )\n cur.execute('''\n CREATE TABLE IF NOT EXISTS term_attrs (\n term TEXT,\n key TEXT,\n val TEXT\n );\n ''')\n\n def _clear_tables(self):\n cur = self.db.cursor()\n cur.execute('DELETE FROM terms; DELETE FROM term_loci;')\n\n def _build_indices(self):\n cursor = self.db.cursor()\n cursor.execute('CREATE INDEX IF NOT EXISTS termIND ON terms (id)')\n cursor.execute('CREATE INDEX IF NOT EXISTS lociIND ON term_loci (term,id)')\n\n def _drop_indices(self):\n cursor = self.db.cursor()\n cursor.execute('DROP INDEX IF EXISTS termIND; DROP INDEX IF EXISTS lociIND;')\n\n def enrichment(self, locus_list, pval_cutoff=0.05, max_term_size=300,\n min_term_size=2, num_universe=None, return_table=False,\n label=None,include_genes=False,bonferroni_correction=True,\n min_overlap=1):\n '''\n Evaluates enrichment of loci within the locus list for terms within\n the ontology. NOTE: this only tests terms that have at least one\n locus that exists in locus_list.\n\n Parameters\n ----------\n locus_list : list of co.Locus *or* instance of co.Ontology\n A list of loci for which to test enrichment. i.e. is there\n an over-representation of these loci within and the terms in\n the Ontology. If an ontology is passed, each term in the ontology\n will be iterated over and tested as if they were a locus_list.\n pval_cutoff : float (default: 0.05)\n Report terms with a pval lower than this value\n bonferroni_correction : bool (default: True)\n correct for testing multiple terms using Bonferroni correction\n max_term_size : int (default: 300)\n The maximum term size for which to test enrichment. Useful\n for filtering out large terms that would otherwise be \n uninformative (e.g. top level GO terms)\n min_term_size : int (default: 5)\n The minimum term size for which to test enrichment. Useful\n for filtering out very small terms that would be uninformative\n (e.g. single gene terms)\n num_universe : int (default: None)\n Use a custom universe size for the hypergeometric calculation, \n for instance if you have a reduced number of genes in a reference\n co-expression network. If None, the value will be calculated as\n the total number of distinct genes that are observed in the \n ontology.\n include_genes : bool (default: False)\n Include comma delimited genes as a field\n return_table : bool (default: False)\n If True, return results as a data frame\n label: str (default: None)\n If a label is specified, it will be inlcuded in the results\n min_overlap : int (default: 1)\n The minimum overlap between genes in the term and genes in\n the locus list. Increasing this value can minimize spurious\n or uninformative terms\n '''\n if isinstance(locus_list,co.Ontology):\n ontology = locus_list\n self.log('Calculating enrichment for an Ontology: {}',ontology.name)\n\n enrich = []\n if label is None:\n label = ontology.name\n if num_universe is None:\n num_universe = len(set(self.distinct_loci_ids()).union(ontology.distinct_loci_ids()))\n for term in ontology.terms(min_term_size=min_term_size,max_term_size=max_term_size):\n term = copy.copy(term)\n e = self.enrichment(\n term.loci,\n pval_cutoff=pval_cutoff,\n max_term_size=max_term_size,\n min_term_size=min_term_size,\n num_universe=num_universe,\n return_table=return_table,\n label=label+'_'+term.id,\n include_genes=include_genes,\n bonferroni_correction=bonferroni_correction,\n min_overlap=min_overlap,\n ) \n enrich.append(e)\n if return_table:\n return pd.concat(enrich)\n else:\n return enrich\n # return a new copy of each \n\n terms = [copy.copy(term) for term in self.terms_containing(\n locus_list,\n min_term_size=min_term_size,\n max_term_size=max_term_size\n )]\n # Calculate the size of the Universe\n if num_universe is None:\n num_universe = self.num_distinct_loci() \n self.log(\n '{}: Loci occur in {} terms, containing {} genes'.format(\n label,len(terms), num_universe\n )\n )\n significant_terms = []\n for term in terms:\n term_genes = set(term.loci)\n #if len(term_genes) > max_term_size:\n # continue\n num_common = len(term_genes.intersection(locus_list))\n num_in_term = len(term_genes)\n num_sampled = len(locus_list)\n # the reason this is num_common - 1 is because we are looking for 1 - cdf\n # and we need to greater than OR EQUAL TO num_common\n # Look. Do this in ipython:\n '''\n In [99]: probs = [hypergeom.pmf(x,100,5,10) for x in range(0,6)]\n In [100]: probs\n Out[100]: \n [0.58375236692612187,\n 0.33939091100357333,\n 0.070218809173150043,\n 0.006383528106649855,\n 0.00025103762217164457,\n 3.3471682956218215e-06]\n In [103]: 1-sum(probs[0:3]) \n # Get the probs of drawing 3 or more\n Out[103]: 0.006637912897154763\n # Remember slicing is exclusive for the end value\n In [105]: hypergeom.sf(3,100,5,10)\n # That aint right\n Out[105]: 0.00025438479046726637\n In [106]: hypergeom.sf(3-1,100,5,10)\n # See? You want num_common - 1\n Out[106]: 0.0066379128971171221\n '''\n pval = hypergeom.sf(num_common-1,num_universe,num_in_term,num_sampled)\n if pval <= pval_cutoff and num_common >= min_overlap:\n term.attrs['hyper'] = OrderedDict([\n ('source' , self.name),\n ('pval' , pval),\n ('terms_tested' , len(terms)),\n ('num_common' , num_common),\n ('num_universe' , num_universe),\n ('source_term_size' , num_in_term),\n ('target_term_size' , len(locus_list)),\n ('num_terms' , len(self)),\n #('num_sampled' , num_sampled)\n ])\n if label != None:\n term.attrs['hyper']['label'] = label\n if bonferroni_correction == True:\n # Right now this isn't true bonferroni, its only correcting for\n # the number of terms that had term genes in it\n if pval > pval_cutoff / len(terms):\n term.attrs['hyper']['bonferroni'] = False\n else:\n term.attrs['hyper']['bonferroni'] = True\n term.attrs['pval'] = pval\n if include_genes == True:\n term.attrs['hyper']['genes'] = \",\".join(\n [x.id for x in term_genes.intersection(locus_list)]\n )\n significant_terms.append(term)\n self.log('\\t\\tFound {} was significant for {} terms',label,len(significant_terms))\n if return_table == True:\n tbl = []\n for x in significant_terms:\n val = OrderedDict([\n ('name', x.name),\n ('id' , x.id)\n ])\n val.update(x.attrs['hyper'])\n val.update(x.attrs)\n del val['hyper']\n tbl.append(val)\n tbl = DataFrame.from_records(tbl)\n #if label != None:\n # tbl['label'] = label\n if len(tbl) > 0:\n tbl = tbl.sort_values(by='pval')\n return tbl\n else:\n return sorted(significant_terms,key=lambda x: x.attrs['pval'])\n\n\n\n" ]
[ [ "pandas.DataFrame.from_records", "pandas.concat", "scipy.stats.hypergeom.sf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
davidmccandlish/vcregression
[ "25f5c0148798fa6936088eb6eee6e3e52d67f449" ]
[ "vcregression/vc_pos_var.py" ]
[ "import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"a\", help=\"alphabet size\", type=int)\nparser.add_argument(\"l\", help=\"sequence length\", type=int)\nparser.add_argument(\"-name\", help=\"name of output folder\")\nparser.add_argument(\"-data\", help=\"path to input data\",\n type=str, required=True)\nparser.add_argument(\"-lambdas\", help=\"path to lambdas\",\n type=str, required=True)\n\nparser.add_argument(\"-seqsvar\", help=\"list of sequences for calculating posterior variances\",\n dest=\"seqsvar\", type=str, required=True)\n\n\nimport numpy as np\nimport scipy as sp\nimport itertools\nimport sys\nimport time\nimport scipy as sp\nimport itertools\nimport os\nimport math\nimport csv\nimport pandas as pd\nimport random as rd\nimport statistics\nfrom scipy.sparse import csr_matrix, dia_matrix\nfrom scipy.optimize import minimize\nfrom scipy.special import comb\nfrom scipy.spatial.distance import hamming\nfrom scipy.sparse.linalg import LinearOperator\nfrom scipy.sparse.linalg import cg\n\n\nimport vc_regression as vc\n\n\n############################\nargs = parser.parse_args()\n\nif args.name == None:\n args.name = \"my_project\"\n\nname = args.name\noutdir = name\n\n\na = args.a\nl = args.l\n\n\n# QC\nif a**l > 5000000:\n print(\"sequence space is to big!\")\n exit()\n\nvc.preliminary_preparation(a, l)\n\ndata = pd.read_csv(args.data, header=None)\n\n#########\nbabel = ''\nfor i in range(len(data)):\n babel += data[0][i]\n\nalphabet = set(babel)\n\nAA2N = dict([(sorted(alphabet)[i], i) for i in range(len(alphabet))])\nN2AA = {v: k for k, v in AA2N.items()}\n\n\ndef seqAA2num(seq):\n return [AA2N[seq[i]] for i in range(len(seq))]\n\n\ndef seqnum2AA(seq):\n seqAA = [N2AA[seq[i]] for i in range(len(seq))]\n return ''.join(seqAA)\n\nseqs = [seqAA2num(data[0][i]) for i in range(len(data))]\ntr = np.array([vc.seq2pos(seqs[i]) for i in range(len(seqs))])\n###########\n\n\nseqs = [seqAA2num(data[0][i]) for i in range(len(data))]\ntr = np.array([vc.seq2pos(seqs[i]) for i in range(len(seqs))])\n\nif np.shape(seqs)[1] != l:\n print(\"seqs file dimension incompatible!\")\n exit()\n\n\nys = np.array(data[1])\nsig2s = np.array(data[2])\n\nlda_star = pd.DataFrame(np.array(pd.read_csv(\n args.lambdas, header=None, index_col=0)))\nlda_star = np.array(lda_star).flatten()\nvc.lda_star = lda_star\n\nprint(\"using lambdas = \", str(lda_star))\n\nvc.set_data_as_global_parameters(seqs, ys, sig2s)\nvc.construct_A_sparse()\nvc.construct_E_sparse()\n\n####################\nseqsvar = np.array(pd.read_csv(args.seqsvar, header=None))\nseqsvar = seqsvar.flatten()\nseqsvarN = [seqAA2num(seq) for seq in seqsvar]\n\n\nvarpos = vc.compute_posterior_variance(seqsvarN)\n\n\npd.DataFrame({'seq': seqsvar, 'variance': varpos}).to_csv(\n outdir + '/varpos.txt', index=False, header=['sequence', 'variance'])\n\n\nprint(\"Done!\")\n" ]
[ [ "numpy.array", "pandas.read_csv", "numpy.shape", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
daniel-schaefer/CompEcon-python
[ "d3f66e04a7e02be648fc5a68065806ec7cc6ffd6" ]
[ "compecon/demos/demqua04.py" ]
[ "\n# coding: utf-8\n\n# ### DEMQUA04\n# # Area under normal pdf using Simpson's rule\n\n# In[1]:\n\n\nimport numpy as np\nfrom compecon import qnwsimp, demo\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\nn, a, z = 11, 0, 1\n\ndef f(x):\n return np.sqrt(1/(2*np.pi))*np.exp(-0.5*x**2)\n\n\n# In[3]:\n\n\nx, w = qnwsimp(n, a, z)\nprob = 0.5 + w.dot(f(x))\n\n\n# In[4]:\n\n\na, b, n = -4, 4, 500\nx = np.linspace(a, b, n)\nxz = np.linspace(a, z, n)\n\nplt.figure(figsize=[8,4])\nplt.fill_between(xz,f(xz), color='yellow')\nplt.hlines(0, a, b,'k','solid')\nplt.vlines(z, 0, f(z),'r',linewidth=2)\nplt.plot(x,f(x), linewidth=3)\ndemo.annotate(-1, 0.08,r'$\\Pr\\left(\\tilde Z\\leq z\\right)$',fs=18,ms=0)\nplt.yticks([])\nplt.xticks([z],['$z$'],size=20)\n\ndemo.savefig([plt.gcf()])" ]
[ [ "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.hlines", "matplotlib.pyplot.gcf", "matplotlib.pyplot.yticks", "numpy.exp", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
verlab/DEAL_NeurIPS_2021
[ "02480e12035c20227f9d9bba05c07048927ea774" ]
[ "evaluation/distmat_tools.py" ]
[ "# Copyright 2021 [name of copyright owner]\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\nimport cv2 \n\ndef distMat(a,b):\n m = a.shape[0]\n n = b.shape[0]\n a_norm = np.linalg.norm(a, axis=1)\n b_norm = np.linalg.norm(b, axis=1)\n A = np.repeat(a_norm[:, np.newaxis], n, axis = 1)\n B = np.repeat(b_norm[np.newaxis, :], m, axis = 0)\n x = a @ (b / b_norm[:, np.newaxis]).T\n D = np.sqrt((B-x)**2 + A**2 - x**2)\n return D\n\ndef save(desc_ref, desc_tgt, filename):\n\tdesc_ref = np.array(desc_ref)\n\tdesc_tgt = np.array(desc_tgt)\n\n\tdist_mat = distMat(desc_ref, desc_tgt)\n\n\twith open(filename, 'w') as f:\n\t\tf.write('%d %d\\n'%(dist_mat.shape[0], dist_mat.shape[1]))\n\t\tfor i in range(dist_mat.shape[0]):\n\t\t\tfor j in range(dist_mat.shape[1]):\n\t\t\t\tf.write('%.5f '%(dist_mat[i,j]))\n\t\t\t\n\t\tf.write('\\n')\n\ndef save_cvnorm(desc_ref, desc_tgt, filename):\n\tdesc_ref = np.array(desc_ref)\n\tdesc_tgt = np.array(desc_tgt)\n\n\tdist_mat = distMat(desc_ref, desc_tgt)\n\n\tfor i in range(dist_mat.shape[0]):\n\t\tfor j in range(dist_mat.shape[1]):\n\t\t\tdist_mat[i,j] = cv2.norm(desc_ref[i] - desc_tgt[j])\n\n\twith open(filename, 'w') as f:\n\t\tf.write('%d %d\\n'%(dist_mat.shape[0], dist_mat.shape[1]))\n\t\tfor i in range(dist_mat.shape[0]):\n\t\t\tfor j in range(dist_mat.shape[1]):\n\t\t\t\tf.write('%.5f '%(dist_mat[i,j]))\n\t\t\t\n\t\tf.write('\\n')\t\n\ndef load_cv_kps(csv):\n\tkeypoints = []\n\tfor line in csv:\n\t\tk = cv2.KeyPoint(line['x'], line['y'], line['size']*1., line['angle'])\n\t\tkeypoints.append(k)\n\n\treturn keypoints" ]
[ [ "numpy.repeat", "numpy.array", "numpy.linalg.norm", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rollno4/tensorflow
[ "169124c0c9630b719e7f0e55722c38c7ecd6c5ac" ]
[ "tensorflow/python/distribute/tpu_strategy.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TPU Strategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport copy\nimport weakref\n\nimport numpy as np\n\nfrom tensorflow.python.autograph.core import ag_ctx\nfrom tensorflow.python.autograph.impl import api as autograph\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute.cluster_resolver import TPUClusterResolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import device_spec\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.tpu import device_assignment as device_assignment_lib # pylint: disable=unused-import\nfrom tensorflow.python.tpu import tpu\nfrom tensorflow.python.tpu import tpu_strategy_util\nfrom tensorflow.python.tpu import tpu_system_metadata as tpu_system_metadata_lib\nfrom tensorflow.python.tpu import training_loop\nfrom tensorflow.python.tpu.ops import tpu_ops\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef get_tpu_system_metadata(tpu_cluster_resolver):\n \"\"\"Retrieves TPU system metadata given a TPUClusterResolver.\"\"\"\n master = tpu_cluster_resolver.master()\n\n # pylint: disable=protected-access\n cluster_spec = tpu_cluster_resolver.cluster_spec()\n cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None\n tpu_system_metadata = (\n tpu_system_metadata_lib._query_tpu_system_metadata(\n master,\n cluster_def=cluster_def,\n query_topology=False))\n\n return tpu_system_metadata\n\n\[email protected]\ndef maybe_init_scope():\n if ops.executing_eagerly_outside_functions():\n yield\n else:\n with ops.init_scope():\n yield\n\n\ndef validate_experimental_run_function(fn):\n \"\"\"Validate the function passed into strategy.experimental_run_v2.\"\"\"\n\n # We allow three types of functions/objects passed into TPUStrategy\n # experimental_run_v2 in eager mode:\n # 1. a user annotated tf.function\n # 2. a ConcreteFunction, this is mostly what you get from loading a saved\n # model.\n # 3. a callable object and the `__call__` method itself is a tf.function.\n #\n # Otherwise we return an error, because we don't support eagerly running\n # experimental_run_v2 in TPUStrategy.\n\n if context.executing_eagerly() and not isinstance(\n fn, def_function.Function) and not isinstance(\n fn, function.ConcreteFunction) and not (callable(fn) and isinstance(\n fn.__call__, def_function.Function)):\n raise NotImplementedError(\n \"TPUStrategy.experimental_run_v2(fn, ...) does not support eager \"\n \"execution. Either convert `fn` into a tf.function or consider \"\n \"calling strategy.experimental_run_v2 inside a tf.function.\")\n\n\n@tf_export(\"distribute.experimental.TPUStrategy\", v1=[])\nclass TPUStrategy(distribute_lib.Strategy):\n \"\"\"TPU distribution strategy implementation.\"\"\"\n\n def __init__(self,\n tpu_cluster_resolver=None,\n device_assignment=None):\n \"\"\"Synchronous training in TPU donuts or Pods.\n\n To construct a TPUStrategy object, you need to run the\n initialization code as below:\n\n ```python\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.experimental.TPUStrategy(resolver)\n ```\n\n While using distribution strategies, the variables created within strategy's\n scope will be replicated across all the replicas and can be kept in sync\n using all-reduce algorithms.\n\n To run TF2 programs on TPUs, you can either use `.compile` and\n `.fit` APIs in `tf.keras` with TPUStrategy, or write your own customized\n training loop by calling `strategy.experimental_run_v2` directly. Note that\n TPUStrategy doesn't support pure eager execution, so please make sure the\n function passed into `strategy.experimental_run_v2` is a `tf.function` or\n `strategy.experimental_run_v2` us called inside a `tf.function` if running\n in eager mode.\n\n Args:\n tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,\n which provides information about the TPU cluster.\n device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to\n specify the placement of replicas on the TPU cluster. Currently only\n supports the usecase of using a single core within a TPU cluster.\n \"\"\"\n super(TPUStrategy, self).__init__(TPUExtended(\n self, tpu_cluster_resolver, device_assignment=device_assignment))\n distribute_lib.distribution_strategy_gauge.get_cell(\"V2\").set(\"TPUStrategy\")\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended.num_hosts)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_replicas_per_worker\").set(self.extended.num_replicas_per_host)\n\n # TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this\n # can use the default implementation.\n # This implementation runs a single step. It does not use infeed or outfeed.\n def experimental_run_v2(self, fn, args=(), kwargs=None):\n \"\"\"See base class.\"\"\"\n validate_experimental_run_function(fn)\n\n # Note: the target function is converted to graph even when in Eager mode,\n # so autograph is on by default here.\n fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx())\n return self.extended.tpu_run(fn, args, kwargs)\n\n\n@tf_export(v1=[\"distribute.experimental.TPUStrategy\"])\nclass TPUStrategyV1(distribute_lib.StrategyV1):\n \"\"\"TPU distribution strategy implementation.\"\"\"\n\n def __init__(self,\n tpu_cluster_resolver=None,\n steps_per_run=None,\n device_assignment=None):\n \"\"\"Initializes the TPUStrategy object.\n\n Args:\n tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,\n which provides information about the TPU cluster.\n steps_per_run: Number of steps to run on device before returning to the\n host. Note that this can have side-effects on performance, hooks,\n metrics, summaries etc.\n This parameter is only used when Distribution Strategy is used with\n estimator or keras.\n device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to\n specify the placement of replicas on the TPU cluster. Currently only\n supports the usecase of using a single core within a TPU cluster.\n \"\"\"\n super(TPUStrategyV1, self).__init__(TPUExtended(\n self, tpu_cluster_resolver, steps_per_run, device_assignment))\n distribute_lib.distribution_strategy_gauge.get_cell(\"V1\").set(\"TPUStrategy\")\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended.num_hosts)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_replicas_per_worker\").set(self.extended.num_replicas_per_host)\n\n @property\n def steps_per_run(self):\n \"\"\"DEPRECATED: use .extended.steps_per_run instead.\"\"\"\n return self._extended.steps_per_run\n\n # TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this\n # can use the default implementation.\n # This implementation runs a single step. It does not use infeed or outfeed.\n def experimental_run_v2(self, fn, args=(), kwargs=None):\n \"\"\"See base class.\"\"\"\n validate_experimental_run_function(fn)\n\n fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx())\n return self.extended.tpu_run(fn, args, kwargs)\n\n\n# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.\nclass TPUExtended(distribute_lib.StrategyExtendedV1):\n \"\"\"Implementation of TPUStrategy.\"\"\"\n\n def __init__(self,\n container_strategy,\n tpu_cluster_resolver=None,\n steps_per_run=None,\n device_assignment=None):\n super(TPUExtended, self).__init__(container_strategy)\n\n if tpu_cluster_resolver is None:\n tpu_cluster_resolver = TPUClusterResolver(\"\")\n\n if steps_per_run is None:\n # TODO(frankchn): Warn when we are being used by DS/Keras and this is\n # not specified.\n steps_per_run = 1\n\n self._tpu_function_cache = weakref.WeakKeyDictionary()\n self._tpu_cluster_resolver = tpu_cluster_resolver\n self._tpu_metadata = get_tpu_system_metadata(self._tpu_cluster_resolver)\n self._device_assignment = device_assignment\n\n self._tpu_devices = [d.name for d in self._tpu_metadata.devices\n if \"device:TPU:\" in d.name]\n\n # Only create variables for the number of replicas we're running.\n if device_assignment is not None:\n job_name = device_spec.DeviceSpecV2.from_string(self._tpu_devices[0]).job\n\n self._tpu_devices = []\n for replica_id in range(device_assignment.num_replicas):\n tpu_device = device_assignment.tpu_device(\n replica=replica_id, logical_core=0, job=job_name)\n tpu_device = device_util.canonicalize(tpu_device)\n self._tpu_devices.append(tpu_device)\n\n self._host_device = device_util.get_host_for_device(self._tpu_devices[0])\n\n self._device_map = values.ReplicaDeviceMap(self._tpu_devices)\n\n # Preload the data onto the TPUs.\n input_worker_devices = collections.OrderedDict()\n for tpu_device in self._tpu_devices:\n host_device = device_util.get_host_for_device(tpu_device)\n input_worker_devices.setdefault(host_device, [])\n input_worker_devices[host_device].append(tpu_device)\n self._input_worker_devices = tuple(input_worker_devices.items())\n self._input_workers_obj = None\n\n # TODO(sourabhbajaj): Remove this once performance of running one step\n # at a time is comparable to multiple steps.\n self.steps_per_run = steps_per_run\n self._require_static_shapes = True\n\n # TPUStrategy handles the graph replication in TF-XLA bridge, so we don't\n # need to retrace functions for each device.\n self._retrace_functions_for_each_device = False\n\n self.experimental_enable_get_next_as_optional = True\n self.experimental_enable_dynamic_batch_size = True\n self._prefetch_on_host = False\n\n # TODO(bfontain): Remove once a proper dataset API exists for prefetching\n # a dataset to multiple devices exists.\n # If value is true, this forces prefetch of data to the host's memeory rather\n # than the individual TPU device's memory. This is needed when using for TPU\n # Embeddings as a) sparse tensors cannot be prefetched to the TPU device\n # memory and b) TPU Embedding enqueue operation are CPU ops and this avoids\n # a copy back to the host for dense tensors\n def _set_prefetch_on_host(self, value):\n if self._prefetch_on_host == value:\n return\n if self._input_workers_obj is not None:\n raise RuntimeError(\"Unable to change prefetch on host behavior as \"\n \"InputWorkers are already created.\")\n self._prefetch_on_host = value\n if value:\n # To prefetch on the host, we must set all the input worker devices to the\n # corresponding host devices.\n self._input_worker_devices = tuple([\n tuple([host,\n [device_util.get_host_for_device(d) for d in devices]])\n for host, devices in self._input_worker_devices])\n # Force creation of the workers.\n workers = self._input_workers\n del workers\n\n @property\n def _input_workers(self):\n if self._input_workers_obj is None:\n self._input_workers_obj = input_lib.InputWorkers(\n self._device_map, self._input_worker_devices)\n return self._input_workers_obj\n\n def _validate_colocate_with_variable(self, colocate_with_variable):\n values.validate_colocate(colocate_with_variable, self)\n\n def _make_dataset_iterator(self, dataset):\n \"\"\"Make iterators for each of the TPU hosts.\"\"\"\n return input_lib.DatasetIterator(\n dataset,\n self._input_workers,\n self._container_strategy(),\n split_batch_by=self._num_replicas_in_sync)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n input_contexts = []\n num_workers = self._input_workers.num_workers\n for i in range(num_workers):\n input_contexts.append(distribute_lib.InputContext(\n num_input_pipelines=num_workers,\n input_pipeline_id=i,\n num_replicas_in_sync=self._num_replicas_in_sync))\n return input_lib.InputFunctionIterator(\n input_fn,\n self._input_workers,\n input_contexts,\n self._container_strategy())\n\n def _experimental_make_numpy_dataset(self, numpy_input, session):\n return numpy_dataset.one_host_numpy_dataset(\n numpy_input, numpy_dataset.SingleDevice(self._host_device),\n session)\n\n def _experimental_distribute_dataset(self, dataset):\n return input_lib.get_distributed_dataset(\n dataset,\n self._input_workers,\n self._container_strategy(),\n split_batch_by=self._num_replicas_in_sync)\n\n def _experimental_distribute_datasets_from_function(self, dataset_fn):\n input_contexts = []\n num_workers = self._input_workers.num_workers\n for i in range(num_workers):\n input_contexts.append(distribute_lib.InputContext(\n num_input_pipelines=num_workers,\n input_pipeline_id=i,\n num_replicas_in_sync=self._num_replicas_in_sync))\n\n return input_lib.get_distributed_datasets_from_function(\n dataset_fn,\n self._input_workers,\n input_contexts,\n self._container_strategy())\n\n # TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.\n # TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have\n # a mechanism to infer the outputs of `fn`. Pending b/110550782.\n def _experimental_run_steps_on_iterator(\n self, fn, multi_worker_iterator, iterations, initial_loop_values=None):\n # Wrap `fn` for repeat.\n if initial_loop_values is None:\n initial_loop_values = {}\n initial_loop_values = nest.flatten(initial_loop_values)\n ctx = input_lib.MultiStepContext()\n\n def run_fn(inputs):\n \"\"\"Single step on the TPU device.\"\"\"\n fn_result = fn(ctx, inputs)\n flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n if flat_last_step_outputs:\n with ops.control_dependencies([fn_result]):\n return [array_ops.identity(f) for f in flat_last_step_outputs]\n else:\n return fn_result\n\n # We capture the control_flow_context at this point, before we run `fn`\n # inside a while_loop and TPU replicate context. This is useful in cases\n # where we might need to exit these contexts and get back to the outer\n # context to do some things, for e.g. create an op which should be\n # evaluated only once at the end of the loop on the host. One such usage\n # is in creating metrics' value op.\n self._outer_control_flow_context = (\n ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access\n\n def rewrite_fn(*args):\n \"\"\"The rewritten step fn running on TPU.\"\"\"\n del args\n\n per_replica_inputs = multi_worker_iterator.get_next()\n replicate_inputs = []\n for replica_id in range(self._num_replicas_in_sync):\n select_replica = lambda x: values.select_replica(replica_id, x) # pylint: disable=cell-var-from-loop\n replicate_inputs.append((nest.map_structure(\n select_replica, per_replica_inputs),))\n\n replicate_outputs = tpu.replicate(\n run_fn, replicate_inputs, device_assignment=self._device_assignment)\n\n # If run_fn has tensor outputs, tpu.replicate returns a list of list. We\n # will flatten it in this case. If run_fn has no tensor outputs,\n # tpu.replicate returns a list of no_ops, we will keep the output as it\n # is.\n if isinstance(replicate_outputs[0], list):\n replicate_outputs = nest.flatten(replicate_outputs)\n\n return replicate_outputs\n\n # TODO(sourabhbajaj): The input to while loop should be based on the\n # output type of the step_fn\n assert isinstance(initial_loop_values, list)\n initial_loop_values = initial_loop_values * self._num_replicas_in_sync\n\n # Put the while loop op on TPU host 0.\n with ops.device(self._host_device):\n if self.steps_per_run == 1:\n replicate_outputs = rewrite_fn()\n else:\n replicate_outputs = training_loop.repeat(iterations, rewrite_fn,\n initial_loop_values)\n\n del self._outer_control_flow_context\n ctx.run_op = control_flow_ops.group(replicate_outputs)\n\n if isinstance(replicate_outputs, list):\n # Filter out any ops from the outputs, typically this would be the case\n # when there were no tensor outputs.\n last_step_tensor_outputs = [\n x for x in replicate_outputs if not isinstance(x, ops.Operation)\n ]\n\n # Outputs are currently of the structure (flattened)\n # [output0_device0, output1_device0, output2_device0,\n # output0_device1, output1_device1, output2_device1,\n # ...]\n # Convert this to the following structure instead: (grouped by output)\n # [[output0_device0, output0_device1],\n # [output1_device0, output1_device1],\n # [output2_device0, output2_device1]]\n output_num = len(last_step_tensor_outputs) // self._num_replicas_in_sync\n last_step_tensor_outputs = [\n last_step_tensor_outputs[i::output_num] for i in range(output_num)\n ]\n else:\n # no tensors returned.\n last_step_tensor_outputs = []\n\n _set_last_step_outputs(ctx, last_step_tensor_outputs)\n return ctx\n\n def _call_for_each_replica(self, fn, args, kwargs):\n # TODO(jhseu): Consider making it so call_for_each_replica implies that\n # we're in a tpu.rewrite(), and update TPUMirroredVariable accordingly.\n with _TPUReplicaContext(self._container_strategy()):\n return fn(*args, **kwargs)\n\n def _experimental_initialize_system(self):\n \"\"\"Experimental method added to be used by Estimator.\n\n This is a private method only to be used by Estimator. Other frameworks\n should directly be calling `tf.tpu.experimental.initialize_tpu_system`\n \"\"\"\n tpu_strategy_util.initialize_tpu_system(self._tpu_cluster_resolver)\n\n def _create_variable(self, next_creator, *args, **kwargs):\n \"\"\"Create a TPUMirroredVariable. See `DistributionStrategy.scope`.\"\"\"\n if kwargs.pop(\"skip_mirrored_creator\", False):\n return next_creator(*args, **kwargs)\n\n colocate_with = kwargs.pop(\"colocate_with\", None)\n if colocate_with is None:\n device_map = self._device_map\n logical_device = 0 # TODO(josh11b): Get logical device from scope here.\n elif isinstance(colocate_with, numpy_dataset.SingleDevice):\n with ops.device(colocate_with.device):\n return next_creator(*args, **kwargs)\n else:\n device_map = colocate_with.device_map\n logical_device = colocate_with.logical_device\n\n def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring\n initial_value = None\n value_list = []\n for i, d in enumerate(devices):\n with ops.device(d):\n if i == 0:\n initial_value = kwargs[\"initial_value\"]\n # Note: some v1 code expects variable initializer creation to happen\n # inside a init_scope.\n with maybe_init_scope():\n initial_value = initial_value() if callable(\n initial_value) else initial_value\n\n if i > 0:\n # Give replicas meaningful distinct names:\n var0name = value_list[0].name.split(\":\")[0]\n # We append a / to variable names created on replicas with id > 0 to\n # ensure that we ignore the name scope and instead use the given\n # name as the absolute name of the variable.\n kwargs[\"name\"] = \"%s/replica_%d/\" % (var0name, i)\n kwargs[\"initial_value\"] = initial_value\n\n with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n v = next_creator(*args, **kwargs)\n\n assert not isinstance(v, values.TPUMirroredVariable)\n value_list.append(v)\n return value_list\n\n return values.create_mirrored_variable(\n self._container_strategy(), device_map, logical_device,\n _real_mirrored_creator, values.TPUMirroredVariable,\n values.TPUSyncOnReadVariable, *args, **kwargs)\n\n def _reduce_to(self, reduce_op, value, destinations):\n if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access\n if reduce_op == reduce_util.ReduceOp.MEAN:\n # TODO(jhseu): Revisit once we support model-parallelism.\n value *= (1. / self._num_replicas_in_sync)\n elif reduce_op != reduce_util.ReduceOp.SUM:\n raise NotImplementedError(\n \"Currently only support sum & mean in TPUStrategy.\")\n return tpu_ops.cross_replica_sum(value)\n\n if not isinstance(value, values.DistributedValues):\n # This function handles reducing values that are not PerReplica or\n # Mirrored values. For example, the same value could be present on all\n # replicas in which case `value` would be a single value or value could\n # be 0.\n return cross_device_ops_lib.reduce_non_distributed_value(\n reduce_op, self._device_map, value, destinations)\n\n # TODO(cjfj): Detect when it is possible to use `cross_replica_sum`.\n # Always performs the reduction on the TPU host.\n with ops.device(self._host_device):\n output = math_ops.add_n(value.values)\n if reduce_op == reduce_util.ReduceOp.MEAN:\n output *= (1. / len(value.values))\n\n devices = cross_device_ops_lib.get_devices_from(destinations)\n\n if len(devices) == 1:\n # If necessary, copy to requested destination.\n dest_canonical = device_util.canonicalize(devices[0])\n host_canonical = device_util.canonicalize(self._host_device)\n\n if dest_canonical != host_canonical:\n with ops.device(dest_canonical):\n output = array_ops.identity(output)\n else:\n output = cross_device_ops_lib.simple_broadcast(output, destinations)\n\n return output\n\n def _update(self, var, fn, args, kwargs, group):\n assert isinstance(var, values.TPUVariableMixin) or isinstance(\n var, resource_variable_ops.BaseResourceVariable)\n if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access\n if group:\n return fn(var, *args, **kwargs)\n else:\n return (fn(var, *args, **kwargs),)\n\n # Otherwise, we revert to MirroredStrategy behavior and update each variable\n # directly.\n updates = []\n for i, (d, v) in enumerate(zip(var.devices, var.values)):\n name = \"update_%d\" % i\n with ops.device(d), distribute_lib.UpdateContext(i), ops.name_scope(name):\n # If args and kwargs are not mirrored, the value is returned as is.\n updates.append(fn(v,\n *values.select_device_mirrored(d, args),\n **values.select_device_mirrored(d, kwargs)))\n return values.update_regroup(self, self._device_map, updates, group)\n\n def read_var(self, var):\n assert isinstance(var, values.TPUVariableMixin) or isinstance(\n var, resource_variable_ops.BaseResourceVariable)\n return var.read_value()\n\n def _local_results(self, val):\n if isinstance(val, values.DistributedValues):\n return val.values\n return (val,)\n\n def value_container(self, value):\n return value\n\n def _broadcast_to(self, tensor, destinations):\n del destinations\n # This is both a fast path for Python constants, and a way to delay\n # converting Python values to a tensor until we know what type it\n # should be converted to. Otherwise we have trouble with:\n # global_step.assign_add(1)\n # since the `1` gets broadcast as an int32 but global_step is int64.\n if isinstance(tensor, (float, int)):\n return tensor\n if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access\n broadcast_tensor = [tensor for _ in range(self._num_replicas_in_sync)]\n result = tpu_ops.all_to_all(\n broadcast_tensor,\n concat_dimension=0,\n split_dimension=0,\n split_count=self._num_replicas_in_sync)\n\n # This uses the broadcasted value from the first replica because the only\n # caller of this is for ONLY_FIRST_REPLICA variables aggregation.\n return result[0]\n return tensor\n\n @property\n def num_hosts(self):\n if self._device_assignment is None:\n return self._tpu_metadata.num_hosts\n\n return len(set([self._device_assignment.host_device(r)\n for r in range(self._device_assignment.num_replicas)]))\n\n @property\n def num_replicas_per_host(self):\n if self._device_assignment is None:\n return self._tpu_metadata.num_of_cores_per_host\n\n # TODO(sourabhbajaj): Remove this method we use inputs and remove infeed\n # as the computation of num_replicas_per_host is not a constant\n # when using device_assignment. This is a temporary workaround to support\n # StatefulRNN as everything is 1 in that case.\n # This method needs to take host_id as input for correct computation.\n max_models_per_host = (self._tpu_metadata.num_of_cores_per_host //\n self._device_assignment.num_cores_per_replica)\n return min(self._device_assignment.num_replicas, max_models_per_host)\n\n @property\n def _num_replicas_in_sync(self):\n if self._device_assignment is None:\n return self._tpu_metadata.num_cores\n return self._device_assignment.num_replicas\n\n @property\n def experimental_between_graph(self):\n return False\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def should_checkpoint(self):\n return True\n\n @property\n def should_save_summary(self):\n return True\n\n @property\n def worker_devices(self):\n return self._tpu_devices\n\n @property\n def parameter_devices(self):\n return self._tpu_devices\n\n def non_slot_devices(self, var_list):\n return self._host_device\n\n def _update_non_slot(self, colocate_with, fn, args, kwargs, group):\n del colocate_with\n with ops.device(self._host_device), distribute_lib.UpdateContext(None):\n result = fn(*args, **kwargs)\n if group:\n return result\n else:\n return nest.map_structure(self._local_results, result)\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n del cluster_spec, task_type, task_id\n if session_config:\n session_config.CopyFrom(self._update_config_proto(session_config))\n\n def _update_config_proto(self, config_proto):\n updated_config = copy.deepcopy(config_proto)\n updated_config.isolate_session_state = True\n cluster_spec = self._tpu_cluster_resolver.cluster_spec()\n if cluster_spec:\n updated_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())\n return updated_config\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n `make_input_fn_iterator` assumes per-replica batching.\n\n Returns:\n Boolean.\n \"\"\"\n return True\n\n def tpu_run(self, fn, args, kwargs):\n func = self._tpu_function_creator(fn)\n return func(args, kwargs)\n\n def _tpu_function_creator(self, fn):\n if fn in self._tpu_function_cache:\n return self._tpu_function_cache[fn]\n\n strategy = self._container_strategy()\n\n def tpu_function(args, kwargs):\n \"\"\"TF Function used to replicate the user computation.\"\"\"\n if kwargs is None:\n kwargs = {}\n\n # Remove None at the end of args as they are not replicatable\n # If there are None in the middle we can't do anything about it\n # so let those cases fail.\n # For example when Keras model predict is used they pass the targets as\n # None. We want to handle it here so all client libraries don't have to\n # do this as other strategies can handle None values better.\n while args and args[-1] is None:\n args = args[:-1]\n\n # Used to re-structure flattened output tensors from `tpu.replicate()`\n # into a structured format.\n result = [[]]\n\n def replicated_fn(replica_id, replica_args, replica_kwargs):\n \"\"\"Wraps user function to provide replica ID and `Tensor` inputs.\"\"\"\n with _TPUReplicaContext(strategy, replica_id_in_sync_group=replica_id):\n result[0] = fn(*replica_args, **replica_kwargs)\n return result[0]\n\n replicate_inputs = [] # By replica.\n for i in range(strategy.num_replicas_in_sync):\n replicate_inputs.append(\n [constant_op.constant(i, dtype=dtypes.int32),\n values.select_replica(i, args),\n values.select_replica(i, kwargs)])\n\n # Construct and pass `maximum_shapes` so that we could support dynamic\n # shapes using dynamic padder.\n if self.experimental_enable_dynamic_batch_size and replicate_inputs:\n maximum_shapes = []\n flattened_list = nest.flatten(replicate_inputs[0])\n for input_tensor in flattened_list:\n if tensor_util.is_tensor(input_tensor):\n rank = input_tensor.get_shape().rank\n else:\n rank = np.rank(input_tensor)\n maximum_shape = tensor_shape.TensorShape([None] * rank)\n maximum_shapes.append(maximum_shape)\n maximum_shapes = nest.pack_sequence_as(replicate_inputs[0],\n maximum_shapes)\n else:\n maximum_shapes = None\n\n with strategy.scope():\n replicate_outputs = tpu.replicate(\n replicated_fn,\n replicate_inputs,\n device_assignment=self._device_assignment,\n maximum_shapes=maximum_shapes)\n\n # Remove all no ops that may have been added during 'tpu.replicate()'\n if isinstance(result[0], list):\n result[0] = [\n output for output in result[0] if tensor_util.is_tensor(output)\n ]\n\n # Workaround for `tpu.replicate` behaviour when single `Tensor` returned.\n if result[0] is None or isinstance(result[0], ops.Operation):\n replicate_outputs = [None] * len(replicate_outputs)\n else:\n replicate_outputs = [\n nest.pack_sequence_as(result[0], nest.flatten(replica_output))\n for replica_output in replicate_outputs\n ]\n device_map = self._device_map # pylint: disable=protected-access\n return values.regroup(device_map, replicate_outputs)\n\n if context.executing_eagerly():\n tpu_function = def_function.function(tpu_function)\n\n self._tpu_function_cache[fn] = tpu_function\n return tpu_function\n\n def _in_multi_worker_mode(self):\n \"\"\"Whether this strategy indicates working in multi-worker settings.\"\"\"\n # TPUStrategy has different distributed training structure that the whole\n # cluster should be treated as single worker from higher-level (e.g. Keras)\n # library's point of view.\n # TODO(rchao): Revisit this as we design a fault-tolerance solution for\n # TPUStrategy.\n return False\n\n\nclass _TPUReplicaContext(distribute_lib.ReplicaContext):\n \"\"\"Replication Context class for TPU Strategy.\"\"\"\n\n # TODO(sourabhbajaj): Call for each replica should be updating this.\n # TODO(b/118385803): Always properly initialize replica_id.\n def __init__(self, strategy, replica_id_in_sync_group=None):\n if replica_id_in_sync_group is None:\n replica_id_in_sync_group = constant_op.constant(0, dtypes.int32)\n distribute_lib.ReplicaContext.__init__(\n self, strategy, replica_id_in_sync_group=replica_id_in_sync_group)\n\n @property\n def devices(self):\n distribute_lib.require_replica_context(self)\n ds = self._strategy\n replica_id = tensor_util.constant_value(self._replica_id_in_sync_group)\n\n if replica_id is None: # Non-constant `Tensor` inside `tpu.replicate`.\n # TODO(cjfj): Return other devices when model parallelism is supported.\n return (tpu.core(0),)\n else:\n return (ds.extended.worker_devices[replica_id],)\n\n\ndef _set_last_step_outputs(ctx, last_step_tensor_outputs):\n \"\"\"Sets the last step outputs on the given context.\"\"\"\n # Convert replicate_outputs to the original dict structure of\n # last_step_outputs.\n last_step_tensor_outputs_dict = nest.pack_sequence_as(\n ctx.last_step_outputs, last_step_tensor_outputs)\n\n for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access\n output = last_step_tensor_outputs_dict[name]\n # For outputs that have already been reduced, take the first value\n # from the list as each value should be the same. Else return the full\n # list of values.\n # TODO(josh11b): If reduce_op is NONE, we should return a PerReplica\n # value.\n if reduce_op is not None:\n # TODO(priyag): Should this return the element or a list with 1 element\n last_step_tensor_outputs_dict[name] = output[0]\n ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access\n" ]
[ [ "tensorflow.python.tpu.ops.tpu_ops.all_to_all", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.tpu.tpu.core", "tensorflow.python.distribute.values.ReplicaDeviceMap", "tensorflow.python.distribute.cross_device_ops.reduce_non_distributed_value", "tensorflow.python.distribute.distribute_lib.UpdateContext", "tensorflow.python.distribute.numpy_dataset.SingleDevice", "tensorflow.python.framework.ops.executing_eagerly_outside_functions", "tensorflow.python.distribute.distribute_lib.distribution_strategy_gauge.get_cell", "tensorflow.python.distribute.values.validate_colocate", "tensorflow.python.framework.ops.device", "tensorflow.python.distribute.cross_device_ops.simple_broadcast", "tensorflow.python.tpu.tpu_system_metadata._query_tpu_system_metadata", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.distribute.cluster_resolver.TPUClusterResolver", "tensorflow.python.distribute.values.update_regroup", "tensorflow.python.tpu.training_loop.repeat", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.tpu.ops.tpu_ops.cross_replica_sum", "tensorflow.python.distribute.values.select_replica", "tensorflow.python.distribute.device_util.get_host_for_device", "tensorflow.python.tpu.tpu_strategy_util.initialize_tpu_system", "tensorflow.python.distribute.cross_device_ops.get_devices_from", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.distribute.values._enclosing_tpu_context", "tensorflow.python.distribute.distribute_lib.InputContext", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.distribute.input_lib.MultiStepContext", "tensorflow.python.util.nest.map_structure", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.tpu.tpu.replicate", "tensorflow.python.distribute.distribute_lib.distribution_strategy_replica_gauge.get_cell", "tensorflow.python.eager.def_function.function", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.distribute.distribute_lib.ReplicaContext.__init__", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.eager.context.device_policy", "tensorflow.python.distribute.device_util.canonicalize", "tensorflow.python.framework.device_spec.DeviceSpecV2.from_string", "tensorflow.python.distribute.values.regroup", "tensorflow.python.util.nest.pack_sequence_as", "tensorflow.python.distribute.distribute_lib.require_replica_context", "tensorflow.python.distribute.values.select_device_mirrored", "numpy.rank", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.ops.math_ops.add_n", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.autograph.core.ag_ctx.control_status_ctx", "tensorflow.python.distribute.input_lib.InputWorkers", "tensorflow.python.util.nest.flatten", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "2.4", "2.3", "2.9", "2.5", "2.2", "2.10" ] } ]
santhalakshminarayana/AutoML
[ "275bd4fab6c0cfa82c2458520b3e685591b174e7" ]
[ "utils/data_preprocess_clustering.py" ]
[ "import pandas as pd\nimport csv\n\nfrom utils.update_logs import update_pass, update_fail\n\ndef data_preprocess_clustering(file_name):\n\t'''\n\tParams:\n\t------\n\t\tfile_name (str) : path to dataset file\n\n\tReturns:\n\t--------\n\t\tstatus (str) : fail or pass of data pre-processing\n\t\tlogs (list) : running logs of data pre-processing\n\t\tdata_dict (dict) : dictionary of data arrays \n\n\tTODO: \n\t-----\t\n\t\tAsk user to select predict column.\n\t\tPre-processing for date-time columns.\n\t'''\n\tfile_type = file_name.split('.')[-1]\n\n\tlogs = []\n\tstatus = 'pass'\n\n\tlogs.append('Processing dataset.')\n\t# check whether dataset contains header or not\n\thas_header = False\n\ttry:\n\t\thas_header = csv.Sniffer().has_header(open(file_name).read(2048))\n\n\texcept:\n\t\tlogs.append('Be sure dataset file is not empty or with proper delimeters accordingly.')\n\t\tstatus = 'fail'\n\t\treturn status, logs, None\n\n\t# read dataset file accordingly with and without header \n\tdf = None\n\tif file_type == 'csv':\n\t\ttry:\n\t\t\tif has_header == False:\n\t\t\t\tdf = pd.read_csv(file_name, sep = \",\", header = None)\n\t\t\telse:\n\t\t\t\tdf = pd.read_csv(file_name, sep = \",\")\n\t\texcept:\n\t\t\tlogs.append('Error while checking dataset file. May due to delimeter, inconsistent format ...')\n\t\t\tstatus = 'fail'\n\t\t\treturn status, logs, None\n\n\telif file_type == 'txt':\n\t\ttry:\n\t\t\tif has_header == False:\n\t\t\t\tdf = pd.read_csv(file_name, sep = \" \", header = None)\n\t\t\telse:\n\t\t\t\tdf = pd.read_csv(file_name, sep = \" \")\n\t\texcept:\n\t\t\tlogs.append('Error while checking dataset file. May due to delimeter, inconsistent format ...')\n\t\t\tstatus = 'fail'\n\t\t\treturn status, logs, None\n\n\tif has_header == False:\n\t\tlogs.append('No header found or header type mismatch.')\n\t\tlogs.append('Assigning headers implicitly.')\n\t\tdf.columns = ['co_' + str(i+1) for i in range(len(df.iloc[0].values))]\n\t\tlogs.append(f'columns = {df.columns.tolist()}')\n\n\t# check for dtype of each column\n\tfor col_dtype in df.dtypes:\n\t\tif col_dtype == 'O':\n\t\t\tlogs.append('Make sure that each column of dataset is either int or float for clustering / anomaly detection.')\n\t\t\tlogs.append('It is better if data_type is int.')\n\t\t\tstatus = 'fail'\n\t\t\treturn status, logs, None\n\n\t# check for null values and fill\n\tcols = df.columns\n\tcols_dtypes = df.dtypes\n\tis_null = df.isnull().any()\n\tnull_cols = []\n\tfor col in cols:\n\t\tif is_null[col] == True:\n\t\t\tnull_cols.append(col)\n\t\t\tdf[col].fillna(df[col].mean(), inplace = True)\n\n\tif len(null_cols) > 0:\n\t\tlogs.append(f'Dataset has NULL values present at columns - {null_cols}.')\n\t\tlogs.append('For these columns NULL values are replaced with MEAN of respective column.')\n\n\t# remove duplicate rows\n\tlogs.append('Removing duplicate rows if present.')\n\tdf.drop_duplicates(inplace = True)\n\n\t# get values of dataframe\n\tdata_dict = dict()\n\tdata_dict['X'] = df.values\n\tstatus = 'pass'\n\n\treturn status, logs, data_dict\n\ndef clustering_dataset(dataset_files):\n\t# pre-processing data files\n\ttrain_status, train_logs, train_data_dict = data_preprocess_clustering(dataset_files['train_file'])\n\tif train_status == 'pass':\n\t\tupdate_pass('Train', train_logs)\n\t\treturn train_data_dict\n\telse:\n\t\tupdate_fail('Train', train_logs)\n\t\treturn None" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]