corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-662101
2409.17453
AgMTR: Agent Mining Transformer for Few-shot Segmentation in Remote Sensing
<|reference_start|>AgMTR: Agent Mining Transformer for Few-shot Segmentation in Remote Sensing: Few-shot Segmentation (FSS) aims to segment the interested objects in the query image with just a handful of labeled samples (i.e., support images). Previous schemes would leverage the similarity between support-query pixel pairs to construct the pixel-level semantic correlation. However, in remote sensing scenarios with extreme intra-class variations and cluttered backgrounds, such pixel-level correlations may produce tremendous mismatches, resulting in semantic ambiguity between the query foreground (FG) and background (BG) pixels. To tackle this problem, we propose a novel Agent Mining Transformer (AgMTR), which adaptively mines a set of local-aware agents to construct agent-level semantic correlation. Compared with pixel-level semantics, the given agents are equipped with local-contextual information and possess a broader receptive field. At this point, different query pixels can selectively aggregate the fine-grained local semantics of different agents, thereby enhancing the semantic clarity between query FG and BG pixels. Concretely, the Agent Learning Encoder (ALE) is first proposed to erect the optimal transport plan that arranges different agents to aggregate support semantics under different local regions. Then, for further optimizing the agents, the Agent Aggregation Decoder (AAD) and the Semantic Alignment Decoder (SAD) are constructed to break through the limited support set for mining valuable class-specific semantics from unlabeled data sources and the query image itself, respectively. Extensive experiments on the remote sensing benchmark iSAID indicate that the proposed method achieves state-of-the-art performance. Surprisingly, our method remains quite competitive when extended to more common natural scenarios, i.e., PASCAL-5i and COCO-20i.<|reference_end|>
arxiv
@article{bi2024agmtr:, title={AgMTR: Agent Mining Transformer for Few-shot Segmentation in Remote Sensing}, author={Hanbo Bi, Yingchao Feng, Yongqiang Mao, Jianning Pei, Wenhui Diao, Hongqi Wang, Xian Sun}, journal={arXiv preprint arXiv:2409.17453}, year={2024}, archivePrefix={arXiv}, eprint={2409.17453}, primaryClass={cs.CV} }
bi2024agmtr:
arxiv-662102
2409.17455
Navigating the Shortcut Maze: A Comprehensive Analysis of Shortcut Learning in Text Classification by Language Models
<|reference_start|>Navigating the Shortcut Maze: A Comprehensive Analysis of Shortcut Learning in Text Classification by Language Models: Language models (LMs), despite their advances, often depend on spurious correlations, undermining their accuracy and generalizability. This study addresses the overlooked impact of subtler, more complex shortcuts that compromise model reliability beyond oversimplified shortcuts. We introduce a comprehensive benchmark that categorizes shortcuts into occurrence, style, and concept, aiming to explore the nuanced ways in which these shortcuts influence the performance of LMs. Through extensive experiments across traditional LMs, large language models, and state-of-the-art robust models, our research systematically investigates models' resilience and susceptibilities to sophisticated shortcuts. Our benchmark and code can be found at: https://github.com/yuqing-zhou/shortcut-learning-in-text-classification.<|reference_end|>
arxiv
@article{zhou2024navigating, title={Navigating the Shortcut Maze: A Comprehensive Analysis of Shortcut Learning in Text Classification by Language Models}, author={Yuqing Zhou, Ruixiang Tang, Ziyu Yao, Ziwei Zhu}, journal={Findings of EMNLP 2024}, year={2024}, archivePrefix={arXiv}, eprint={2409.17455}, primaryClass={cs.CL cs.LG} }
zhou2024navigating
arxiv-662103
2409.17456
Long or Short or Both? An Exploration on Lookback Time Windows of Behavioral Features in Product Search Ranking
<|reference_start|>Long or Short or Both? An Exploration on Lookback Time Windows of Behavioral Features in Product Search Ranking: Customer shopping behavioral features are core to product search ranking models in eCommerce. In this paper, we investigate the effect of lookback time windows when aggregating these features at the (query, product) level over history. By studying the pros and cons of using long and short time windows, we propose a novel approach to integrating these historical behavioral features of different time windows. In particular, we address the criticality of using query-level vertical signals in ranking models to effectively aggregate all information from different behavioral features. Anecdotal evidence for the proposed approach is also provided using live product search traffic on Walmart.com.<|reference_end|>
arxiv
@article{liu2024long, title={Long or Short or Both? An Exploration on Lookback Time Windows of Behavioral Features in Product Search Ranking}, author={Qi Liu, Atul Singh, Jingbo Liu, Cun Mu, Zheng Yan, Jan Pedersen}, journal={arXiv preprint arXiv:2409.17456}, year={2024}, archivePrefix={arXiv}, eprint={2409.17456}, primaryClass={cs.IR} }
liu2024long
arxiv-662104
2409.17457
CadVLM: Bridging Language and Vision in the Generation of Parametric CAD Sketches
<|reference_start|>CadVLM: Bridging Language and Vision in the Generation of Parametric CAD Sketches: Parametric Computer-Aided Design (CAD) is central to contemporary mechanical design. However, it encounters challenges in achieving precise parametric sketch modeling and lacks practical evaluation metrics suitable for mechanical design. We harness the capabilities of pre-trained foundation models, renowned for their successes in natural language processing and computer vision, to develop generative models specifically for CAD. These models are adept at understanding complex geometries and design reasoning, a crucial advancement in CAD technology. In this paper, we propose CadVLM, an end-to-end vision language model for CAD generation. Our approach involves adapting pre-trained foundation models to manipulate engineering sketches effectively, integrating both sketch primitive sequences and sketch images. Extensive experiments demonstrate superior performance on multiple CAD sketch generation tasks such as CAD autocompletion, CAD autoconstraint, and image conditional generation. To our knowledge, this is the first instance of a multimodal Large Language Model (LLM) being successfully applied to parametric CAD generation, representing a pioneering step in the field of computer-aided mechanical design.<|reference_end|>
arxiv
@article{wu2024cadvlm:, title={CadVLM: Bridging Language and Vision in the Generation of Parametric CAD Sketches}, author={Sifan Wu, Amir Khasahmadi, Mor Katz, Pradeep Kumar Jayaraman, Yewen Pu, Karl Willis, Bang Liu}, journal={arXiv preprint arXiv:2409.17457}, year={2024}, archivePrefix={arXiv}, eprint={2409.17457}, primaryClass={cs.CV cs.AI} }
wu2024cadvlm:
arxiv-662105
2409.17458
RED QUEEN: Safeguarding Large Language Models against Concealed Multi-Turn Jailbreaking
<|reference_start|>RED QUEEN: Safeguarding Large Language Models against Concealed Multi-Turn Jailbreaking: The rapid progress of Large Language Models (LLMs) has opened up new opportunities across various domains and applications; yet it also presents challenges related to potential misuse. To mitigate such risks, red teaming has been employed as a proactive security measure to probe language models for harmful outputs via jailbreak attacks. However, current jailbreak attack approaches are single-turn with explicit malicious queries that do not fully capture the complexity of real-world interactions. In reality, users can engage in multi-turn interactions with LLM-based chat assistants, allowing them to conceal their true intentions in a more covert manner. To bridge this gap, we, first, propose a new jailbreak approach, RED QUEEN ATTACK. This method constructs a multi-turn scenario, concealing the malicious intent under the guise of preventing harm. We craft 40 scenarios that vary in turns and select 14 harmful categories to generate 56k multi-turn attack data points. We conduct comprehensive experiments on the RED QUEEN ATTACK with four representative LLM families of different sizes. Our experiments reveal that all LLMs are vulnerable to RED QUEEN ATTACK, reaching 87.62% attack success rate on GPT-4o and 75.4% on Llama3-70B. Further analysis reveals that larger models are more susceptible to the RED QUEEN ATTACK, with multi-turn structures and concealment strategies contributing to its success. To prioritize safety, we introduce a straightforward mitigation strategy called RED QUEEN GUARD, which aligns LLMs to effectively counter adversarial attacks. This approach reduces the attack success rate to below 1% while maintaining the model's performance across standard benchmarks. Full implementation and dataset are publicly accessible at https://github.com/kriti-hippo/red_queen.<|reference_end|>
arxiv
@article{jiang2024red, title={RED QUEEN: Safeguarding Large Language Models against Concealed Multi-Turn Jailbreaking}, author={Yifan Jiang, Kriti Aggarwal, Tanmay Laud, Kashif Munir, Jay Pujara, Subhabrata Mukherjee}, journal={arXiv preprint arXiv:2409.17458}, year={2024}, archivePrefix={arXiv}, eprint={2409.17458}, primaryClass={cs.CR cs.CL cs.LG} }
jiang2024red
arxiv-662106
2409.17459
TFS-NeRF: Template-Free NeRF for Semantic 3D Reconstruction of Dynamic Scene
<|reference_start|>TFS-NeRF: Template-Free NeRF for Semantic 3D Reconstruction of Dynamic Scene: Despite advancements in Neural Implicit models for 3D surface reconstruction, handling dynamic environments with arbitrary rigid, non-rigid, or deformable entities remains challenging. Many template-based methods are entity-specific, focusing on humans, while generic reconstruction methods adaptable to such dynamic scenes often require additional inputs like depth or optical flow or rely on pre-trained image features for reasonable outcomes. These methods typically use latent codes to capture frame-by-frame deformations. In contrast, some template-free methods bypass these requirements and adopt traditional LBS (Linear Blend Skinning) weights for a detailed representation of deformable object motions, although they involve complex optimizations leading to lengthy training times. To this end, as a remedy, this paper introduces TFS-NeRF, a template-free 3D semantic NeRF for dynamic scenes captured from sparse or single-view RGB videos, featuring interactions among various entities and more time-efficient than other LBS-based approaches. Our framework uses an Invertible Neural Network (INN) for LBS prediction, simplifying the training process. By disentangling the motions of multiple entities and optimizing per-entity skinning weights, our method efficiently generates accurate, semantically separable geometries. Extensive experiments demonstrate that our approach produces high-quality reconstructions of both deformable and non-deformable objects in complex interactions, with improved training efficiency compared to existing methods.<|reference_end|>
arxiv
@article{biswas2024tfs-nerf:, title={TFS-NeRF: Template-Free NeRF for Semantic 3D Reconstruction of Dynamic Scene}, author={Sandika Biswas, Qianyi Wu, Biplab Banerjee, Hamid Rezatofighi}, journal={arXiv preprint arXiv:2409.17459}, year={2024}, archivePrefix={arXiv}, eprint={2409.17459}, primaryClass={cs.CV} }
biswas2024tfs-nerf:
arxiv-662107
2409.17460
Towards More Relevant Product Search Ranking Via Large Language Models: An Empirical Study
<|reference_start|>Towards More Relevant Product Search Ranking Via Large Language Models: An Empirical Study: Training Learning-to-Rank models for e-commerce product search ranking can be challenging due to the lack of a gold standard of ranking relevance. In this paper, we decompose ranking relevance into content-based and engagement-based aspects, and we propose to leverage Large Language Models (LLMs) for both label and feature generation in model training, primarily aiming to improve the model's predictive capability for content-based relevance. Additionally, we introduce different sigmoid transformations on the LLM outputs to polarize relevance scores in labeling, enhancing the model's ability to balance content-based and engagement-based relevances and thus prioritize highly relevant items overall. Comprehensive online tests and offline evaluations are also conducted for the proposed design. Our work sheds light on advanced strategies for integrating LLMs into e-commerce product search ranking model training, offering a pathway to more effective and balanced models with improved ranking relevance.<|reference_end|>
arxiv
@article{liu2024towards, title={Towards More Relevant Product Search Ranking Via Large Language Models: An Empirical Study}, author={Qi Liu, Atul Singh, Jingbo Liu, Cun Mu, Zheng Yan}, journal={arXiv preprint arXiv:2409.17460}, year={2024}, archivePrefix={arXiv}, eprint={2409.17460}, primaryClass={cs.IR} }
liu2024towards
arxiv-662108
2409.17466
Adjusting Regression Models for Conditional Uncertainty Calibration
<|reference_start|>Adjusting Regression Models for Conditional Uncertainty Calibration: Conformal Prediction methods have finite-sample distribution-free marginal coverage guarantees. However, they generally do not offer conditional coverage guarantees, which can be important for high-stakes decisions. In this paper, we propose a novel algorithm to train a regression function to improve the conditional coverage after applying the split conformal prediction procedure. We establish an upper bound for the miscoverage gap between the conditional coverage and the nominal coverage rate and propose an end-to-end algorithm to control this upper bound. We demonstrate the efficacy of our method empirically on synthetic and real-world datasets.<|reference_end|>
arxiv
@article{gao2024adjusting, title={Adjusting Regression Models for Conditional Uncertainty Calibration}, author={Ruijiang Gao, Mingzhang Yin, James McInerney, Nathan Kallus}, journal={arXiv preprint arXiv:2409.17466}, year={2024}, archivePrefix={arXiv}, eprint={2409.17466}, primaryClass={stat.ML cs.AI cs.LG} }
gao2024adjusting
arxiv-662109
2409.17467
What is the social benefit of hate speech detection research? A Systematic Review
<|reference_start|>What is the social benefit of hate speech detection research? A Systematic Review: While NLP research into hate speech detection has grown exponentially in the last three decades, there has been minimal uptake or engagement from policy makers and non-profit organisations. We argue the absence of ethical frameworks have contributed to this rift between current practice and best practice. By adopting appropriate ethical frameworks, NLP researchers may enable the social impact potential of hate speech research. This position paper is informed by reviewing forty-eight hate speech detection systems associated with thirty-seven publications from different venues.<|reference_end|>
arxiv
@article{wong2024what, title={What is the social benefit of hate speech detection research? A Systematic Review}, author={Sidney Gig-Jan Wong}, journal={arXiv preprint arXiv:2409.17467}, year={2024}, archivePrefix={arXiv}, eprint={2409.17467}, primaryClass={cs.CL} }
wong2024what
arxiv-662110
2409.17469
Verti-Selector: Automatic Curriculum Learning for Wheeled Mobility on Vertically Challenging Terrain
<|reference_start|>Verti-Selector: Automatic Curriculum Learning for Wheeled Mobility on Vertically Challenging Terrain: Reinforcement Learning (RL) has the potential to enable extreme off-road mobility by circumventing complex kinodynamic modeling, planning, and control by simulated end-to-end trial-and-error learning experiences. However, most RL methods are sample-inefficient when training in a large amount of manually designed simulation environments and struggle at generalizing to the real world. To address these issues, we introduce Verti-Selector (VS), an automatic curriculum learning framework designed to enhance learning efficiency and generalization by selectively sampling training terrain. VS prioritizes vertically challenging terrain with higher Temporal Difference (TD) errors when revisited, thereby allowing robots to learn at the edge of their evolving capabilities. By dynamically adjusting the sampling focus, VS significantly boosts sample efficiency and generalization within the VW-Chrono simulator built on the Chrono multi-physics engine. Furthermore, we provide simulation and physical results using VS on a Verti-4-Wheeler platform. These results demonstrate that VS can achieve 23.08% improvement in terms of success rate by efficiently sampling during training and robustly generalizing to the real world.<|reference_end|>
arxiv
@article{xu2024verti-selector:, title={Verti-Selector: Automatic Curriculum Learning for Wheeled Mobility on Vertically Challenging Terrain}, author={Tong Xu, Chenhui Pan, and Xuesu Xiao}, journal={arXiv preprint arXiv:2409.17469}, year={2024}, archivePrefix={arXiv}, eprint={2409.17469}, primaryClass={cs.RO} }
xu2024verti-selector:
arxiv-662111
2409.17470
Tactile Probabilistic Contact Dynamics Estimation of Unknown Objects
<|reference_start|>Tactile Probabilistic Contact Dynamics Estimation of Unknown Objects: We study the problem of rapidly identifying contact dynamics of unknown objects in partially known environments. The key innovation of our method is a novel formulation of the contact dynamics estimation problem as the joint estimation of contact geometries and physical parameters. We leverage DeepSDF, a compact and expressive neural-network-based geometry representation over a distribution of geometries, and adopt a particle filter to estimate both the geometries in contact and the physical parameters. In addition, we couple the estimator with an active exploration strategy that plans information-gathering moves to further expedite online estimation. Through simulation and physical experiments, we show that our method estimates accurate contact dynamics with fewer than 30 exploration moves for unknown objects touching partially known environments.<|reference_end|>
arxiv
@article{kim2024tactile, title={Tactile Probabilistic Contact Dynamics Estimation of Unknown Objects}, author={Jinhoo Kim, Yifan Zhu, Aaron Dollar}, journal={arXiv preprint arXiv:2409.17470}, year={2024}, archivePrefix={arXiv}, eprint={2409.17470}, primaryClass={cs.RO} }
kim2024tactile
arxiv-662112
2409.17472
Autoregressive Multi-trait Essay Scoring via Reinforcement Learning with Scoring-aware Multiple Rewards
<|reference_start|>Autoregressive Multi-trait Essay Scoring via Reinforcement Learning with Scoring-aware Multiple Rewards: Recent advances in automated essay scoring (AES) have shifted towards evaluating multiple traits to provide enriched feedback. Like typical AES systems, multi-trait AES employs the quadratic weighted kappa (QWK) to measure agreement with human raters, aligning closely with the rating schema; however, its non-differentiable nature prevents its direct use in neural network training. In this paper, we propose Scoring-aware Multi-reward Reinforcement Learning (SaMRL), which integrates actual evaluation schemes into the training process by designing QWK-based rewards with a mean-squared error penalty for multi-trait AES. Existing reinforcement learning (RL) applications in AES are limited to classification models despite associated performance degradation, as RL requires probability distributions; instead, we adopt an autoregressive score generation framework to leverage token generation probabilities for robust multi-trait score predictions. Empirical analyses demonstrate that SaMRL facilitates model training, notably enhancing scoring of previously inferior prompts.<|reference_end|>
arxiv
@article{do2024autoregressive, title={Autoregressive Multi-trait Essay Scoring via Reinforcement Learning with Scoring-aware Multiple Rewards}, author={Heejin Do, Sangwon Ryu, Gary Geunbae Lee}, journal={arXiv preprint arXiv:2409.17472}, year={2024}, archivePrefix={arXiv}, eprint={2409.17472}, primaryClass={cs.CL cs.AI} }
do2024autoregressive
arxiv-662113
2409.17473
An exploratory analysis of Community-based Question-Answering Platforms and GPT-3-driven Generative AI: Is it the end of online community-based learning?
<|reference_start|>An exploratory analysis of Community-based Question-Answering Platforms and GPT-3-driven Generative AI: Is it the end of online community-based learning?: Context: The advent of Large Language Model-driven tools like ChatGPT offers software engineers an interactive alternative to community question-answering (CQA) platforms like Stack Overflow. While Stack Overflow provides benefits from the accumulated crowd-sourced knowledge, it often suffers from unpleasant comments, reactions, and long waiting times. Objective: In this study, we assess the efficacy of ChatGPT in providing solutions to software engineering questions by analyzing its performance specifically against human solutions. Method: We empirically analyze 2564 Python and JavaScript questions from StackOverflow that were asked between January 2022 and December 2022. We parse the questions and answers from Stack Overflow, then collect the answers to the same questions from ChatGPT through API, and employ four textual and four cognitive metrics to compare the answers generated by ChatGPT with the accepted answers provided by human subject matter experts to find out the potential reasons for which future knowledge seekers may prefer ChatGPT over CQA platforms. We also measure the accuracy of the answers provided by ChatGPT. We also measure user interaction on StackOverflow over the past two years using three metrics to determine how ChatGPT affects it. Results: Our analysis indicates that ChatGPT's responses are 66% shorter and share 35% more words with the questions, showing a 25% increase in positive sentiment compared to human responses. ChatGPT's answers' accuracy rate is between 71 to 75%, with a variation in response characteristics between JavaScript and Python. Additionally, our findings suggest a recent 38% decrease in comment interactions on Stack Overflow, indicating a shift in community engagement patterns. A supplementary survey with 14 Python and JavaScript professionals validated these findings.<|reference_end|>
arxiv
@article{hasan2024an, title={An exploratory analysis of Community-based Question-Answering Platforms and GPT-3-driven Generative AI: Is it the end of online community-based learning?}, author={Mohammed Mehedi Hasan, Mahady Hasan, Mamun Bin Ibne Reaz, Jannat Un Nayeem Iqra}, journal={arXiv preprint arXiv:2409.17473}, year={2024}, archivePrefix={arXiv}, eprint={2409.17473}, primaryClass={cs.SE} }
hasan2024an
arxiv-662114
2409.17474
Reducing and Exploiting Data Augmentation Noise through Meta Reweighting Contrastive Learning for Text Classification
<|reference_start|>Reducing and Exploiting Data Augmentation Noise through Meta Reweighting Contrastive Learning for Text Classification: Data augmentation has shown its effectiveness in resolving the data-hungry problem and improving model's generalization ability. However, the quality of augmented data can be varied, especially compared with the raw/original data. To boost deep learning models' performance given augmented data/samples in text classification tasks, we propose a novel framework, which leverages both meta learning and contrastive learning techniques as parts of our design for reweighting the augmented samples and refining their feature representations based on their quality. As part of the framework, we propose novel weight-dependent enqueue and dequeue algorithms to utilize augmented samples' weight/quality information effectively. Through experiments, we show that our framework can reasonably cooperate with existing deep learning models (e.g., RoBERTa-base and Text-CNN) and augmentation techniques (e.g., Wordnet and Easydata) for specific supervised learning tasks. Experiment results show that our framework achieves an average of 1.6%, up to 4.3% absolute improvement on Text-CNN encoders and an average of 1.4%, up to 4.4% absolute improvement on RoBERTa-base encoders on seven GLUE benchmark datasets compared with the best baseline. We present an indepth analysis of our framework design, revealing the non-trivial contributions of our network components. Our code is publicly available for better reproducibility.<|reference_end|>
arxiv
@article{mou2024reducing, title={Reducing and Exploiting Data Augmentation Noise through Meta Reweighting Contrastive Learning for Text Classification}, author={Guanyi Mou, Yichuan Li, Kyumin Lee}, journal={IEEE BigData 2021}, year={2024}, doi={10.1109/BigData52589.2021.9671510}, archivePrefix={arXiv}, eprint={2409.17474}, primaryClass={cs.CL cs.LG} }
mou2024reducing
arxiv-662115
2409.17475
On the Impact of Feature Heterophily on Link Prediction with Graph Neural Networks
<|reference_start|>On the Impact of Feature Heterophily on Link Prediction with Graph Neural Networks: Heterophily, or the tendency of connected nodes in networks to have different class labels or dissimilar features, has been identified as challenging for many Graph Neural Network (GNN) models. While the challenges of applying GNNs for node classification when class labels display strong heterophily are well understood, it is unclear how heterophily affects GNN performance in other important graph learning tasks where class labels are not available. In this work, we focus on the link prediction task and systematically analyze the impact of heterophily in node features on GNN performance. Theoretically, we first introduce formal definitions of homophilic and heterophilic link prediction tasks, and present a theoretical framework that highlights the different optimizations needed for the respective tasks. We then analyze how different link prediction encoders and decoders adapt to varying levels of feature homophily and introduce designs for improved performance. Our empirical analysis on a variety of synthetic and real-world datasets confirms our theoretical insights and highlights the importance of adopting learnable decoders and GNN encoders with ego- and neighbor-embedding separation in message passing for link prediction tasks beyond homophily.<|reference_end|>
arxiv
@article{zhu2024on, title={On the Impact of Feature Heterophily on Link Prediction with Graph Neural Networks}, author={Jiong Zhu, Gaotang Li, Yao-An Yang, Jing Zhu, Xuehao Cui, Danai Koutra}, journal={arXiv preprint arXiv:2409.17475}, year={2024}, archivePrefix={arXiv}, eprint={2409.17475}, primaryClass={cs.LG} }
zhu2024on
arxiv-662116
2409.17476
Improving the Shortest Plank: Vulnerability-Aware Adversarial Training for Robust Recommender System
<|reference_start|>Improving the Shortest Plank: Vulnerability-Aware Adversarial Training for Robust Recommender System: Recommender systems play a pivotal role in mitigating information overload in various fields. Nonetheless, the inherent openness of these systems introduces vulnerabilities, allowing attackers to insert fake users into the system's training data to skew the exposure of certain items, known as poisoning attacks. Adversarial training has emerged as a notable defense mechanism against such poisoning attacks within recommender systems. Existing adversarial training methods apply perturbations of the same magnitude across all users to enhance system robustness against attacks. Yet, in reality, we find that attacks often affect only a subset of users who are vulnerable. These perturbations of indiscriminate magnitude make it difficult to balance effective protection for vulnerable users without degrading recommendation quality for those who are not affected. To address this issue, our research delves into understanding user vulnerability. Considering that poisoning attacks pollute the training data, we note that the higher degree to which a recommender system fits users' training data correlates with an increased likelihood of users incorporating attack information, indicating their vulnerability. Leveraging these insights, we introduce the Vulnerability-aware Adversarial Training (VAT), designed to defend against poisoning attacks in recommender systems. VAT employs a novel vulnerability-aware function to estimate users' vulnerability based on the degree to which the system fits them. Guided by this estimation, VAT applies perturbations of adaptive magnitude to each user, not only reducing the success ratio of attacks but also preserving, and potentially enhancing, the quality of recommendations. Comprehensive experiments confirm VAT's superior defensive capabilities across different recommendation models and against various types of attacks.<|reference_end|>
arxiv
@article{zhang2024improving, title={Improving the Shortest Plank: Vulnerability-Aware Adversarial Training for Robust Recommender System}, author={Kaike Zhang, Qi Cao, Yunfan Wu, Fei Sun, Huawei Shen, Xueqi Cheng}, journal={arXiv preprint arXiv:2409.17476}, year={2024}, doi={10.1145/3640457.3688120}, archivePrefix={arXiv}, eprint={2409.17476}, primaryClass={cs.IR} }
zhang2024improving
arxiv-662117
2409.17479
Traverse the Non-Traversable: Estimating Traversability for Wheeled Mobility on Vertically Challenging Terrain
<|reference_start|>Traverse the Non-Traversable: Estimating Traversability for Wheeled Mobility on Vertically Challenging Terrain: Most traversability estimation techniques divide off-road terrain into traversable (e.g., pavement, gravel, and grass) and non-traversable (e.g., boulders, vegetation, and ditches) regions and then inform subsequent planners to produce trajectories on the traversable part. However, recent research demonstrated that wheeled robots can traverse vertically challenging terrain (e.g., extremely rugged boulders comparable in size to the vehicles themselves), which unfortunately would be deemed as non-traversable by existing techniques. Motivated by such limitations, this work aims at identifying the traversable from the seemingly non-traversable, vertically challenging terrain based on past kinodynamic vehicle-terrain interactions in a data-driven manner. Our new Traverse the Non-Traversable(TNT) traversability estimator can efficiently guide a down-stream sampling-based planner containing a high-precision 6-DoF kinodynamic model, which becomes deployable onboard a small-scale vehicle. Additionally, the estimated traversability can also be used as a costmap to plan global and local paths without sampling. Our experiment results show that TNT can improve planning performance, efficiency, and stability by 50%, 26.7%, and 9.2% respectively on a physical robot platform.<|reference_end|>
arxiv
@article{pan2024traverse, title={Traverse the Non-Traversable: Estimating Traversability for Wheeled Mobility on Vertically Challenging Terrain}, author={Chenhui Pan, Aniket Datar, Anuj Pokhrel, Matthew Choulas, Mohammad Nazeri, and Xuesu Xiao}, journal={arXiv preprint arXiv:2409.17479}, year={2024}, archivePrefix={arXiv}, eprint={2409.17479}, primaryClass={cs.RO} }
pan2024traverse
arxiv-662118
2409.17480
What Would Happen Next? Predicting Consequences from An Event Causality Graph
<|reference_start|>What Would Happen Next? Predicting Consequences from An Event Causality Graph: Existing script event prediction task forcasts the subsequent event based on an event script chain. However, the evolution of historical events are more complicated in real world scenarios and the limited information provided by the event script chain also make it difficult to accurately predict subsequent events. This paper introduces a Causality Graph Event Prediction(CGEP) task that forecasting consequential event based on an Event Causality Graph (ECG). We propose a Semantic Enhanced Distance-sensitive Graph Prompt Learning (SeDGPL) Model for the CGEP task. In SeDGPL, (1) we design a Distance-sensitive Graph Linearization (DsGL) module to reformulate the ECG into a graph prompt template as the input of a PLM; (2) propose an Event-Enriched Causality Encoding (EeCE) module to integrate both event contextual semantic and graph schema information; (3) propose a Semantic Contrast Event Prediction (ScEP) module to enhance the event representation among numerous candidate events and predict consequential event following prompt learning paradigm. %We construct two CGEP datasets based on existing MAVEN-ERE and ESC corpus for experiments. Experiment results validate our argument our proposed SeDGPL model outperforms the advanced competitors for the CGEP task.<|reference_end|>
arxiv
@article{zhan2024what, title={What Would Happen Next? Predicting Consequences from An Event Causality Graph}, author={Chuanhong Zhan and Wei Xiang and Chao Liang and Bang Wang}, journal={arXiv preprint arXiv:2409.17480}, year={2024}, archivePrefix={arXiv}, eprint={2409.17480}, primaryClass={cs.AI} }
zhan2024what
arxiv-662119
2409.17481
MaskLLM: Learnable Semi-Structured Sparsity for Large Language Models
<|reference_start|>MaskLLM: Learnable Semi-Structured Sparsity for Large Language Models: Large Language Models (LLMs) are distinguished by their massive parameter counts, which typically result in significant redundancy. This work introduces MaskLLM, a learnable pruning method that establishes Semi-structured (or ``N:M'') Sparsity in LLMs, aimed at reducing computational overhead during inference. Instead of developing a new importance criterion, MaskLLM explicitly models N:M patterns as a learnable distribution through Gumbel Softmax sampling. This approach facilitates end-to-end training on large-scale datasets and offers two notable advantages: 1) High-quality Masks - our method effectively scales to large datasets and learns accurate masks; 2) Transferability - the probabilistic modeling of mask distribution enables the transfer learning of sparsity across domains or tasks. We assessed MaskLLM using 2:4 sparsity on various LLMs, including LLaMA-2, Nemotron-4, and GPT-3, with sizes ranging from 843M to 15B parameters, and our empirical results show substantial improvements over state-of-the-art methods. For instance, leading approaches achieve a perplexity (PPL) of 10 or greater on Wikitext compared to the dense model's 5.12 PPL, but MaskLLM achieves a significantly lower 6.72 PPL solely by learning the masks with frozen weights. Furthermore, MaskLLM's learnable nature allows customized masks for lossless application of 2:4 sparsity to downstream tasks or domains. Code is available at \url{https://github.com/NVlabs/MaskLLM}.<|reference_end|>
arxiv
@article{fang2024maskllm:, title={MaskLLM: Learnable Semi-Structured Sparsity for Large Language Models}, author={Gongfan Fang, Hongxu Yin, Saurav Muralidharan, Greg Heinrich, Jeff Pool, Jan Kautz, Pavlo Molchanov, Xinchao Wang}, journal={arXiv preprint arXiv:2409.17481}, year={2024}, archivePrefix={arXiv}, eprint={2409.17481}, primaryClass={cs.AI cs.CL cs.LG} }
fang2024maskllm:
arxiv-662120
2409.17483
Heterogeneous Hyper-Graph Neural Networks for Context-aware Human Activity Recognition
<|reference_start|>Heterogeneous Hyper-Graph Neural Networks for Context-aware Human Activity Recognition: Context-aware Human Activity Recognition (CHAR) is challenging due to the need to recognize the user's current activity from signals that vary significantly with contextual factors such as phone placements and the varied styles with which different users perform the same activity. In this paper, we argue that context-aware activity visit patterns in realistic in-the-wild data can equivocally be considered as a general graph representation learning task. We posit that exploiting underlying graphical patterns in CHAR data can improve CHAR task performance and representation learning. Building on the intuition that certain activities are frequently performed with the phone placed in certain positions, we focus on the context-aware human activity problem of recognizing the <Activity, Phone Placement> tuple. We demonstrate that CHAR data has an underlying graph structure that can be viewed as a heterogenous hypergraph that has multiple types of nodes and hyperedges (an edge connecting more than two nodes). Subsequently, learning <Activity, Phone Placement> representations becomes a graph node representation learning problem. After task transformation, we further propose a novel Heterogeneous HyperGraph Neural Network architecture for Context-aware Human Activity Recognition (HHGNN-CHAR), with three types of heterogeneous nodes (user, phone placement, and activity). Connections between all types of nodes are represented by hyperedges. Rigorous evaluation demonstrated that on an unscripted, in-the-wild CHAR dataset, our proposed framework significantly outperforms state-of-the-art (SOTA) baselines including CHAR models that do not exploit graphs, and GNN variants that do not incorporate heterogeneous nodes or hyperedges with overall improvements 14.04% on Matthews Correlation Coefficient (MCC) and 7.01% on Macro F1 scores.<|reference_end|>
arxiv
@article{ge2024heterogeneous, title={Heterogeneous Hyper-Graph Neural Networks for Context-aware Human Activity Recognition}, author={Wen Ge, Guanyi Mou, Emmanuel O. Agu, Kyumin Lee}, journal={PerCom 2023}, year={2024}, doi={10.1109/PerComWorkshops56833.2023.10150328}, archivePrefix={arXiv}, eprint={2409.17483}, primaryClass={cs.LG} }
ge2024heterogeneous
arxiv-662121
2409.17484
Crafting Synthetic Realities: Examining Visual Realism and Misinformation Potential of Photorealistic AI-Generated Images
<|reference_start|>Crafting Synthetic Realities: Examining Visual Realism and Misinformation Potential of Photorealistic AI-Generated Images: Advances in generative models have created Artificial Intelligence-Generated Images (AIGIs) nearly indistinguishable from real photographs. Leveraging a large corpus of 30,824 AIGIs collected from Instagram and Twitter, and combining quantitative content analysis with qualitative analysis, this study unpacks AI photorealism of AIGIs from four key dimensions, content, human, aesthetic, and production features. We find that photorealistic AIGIs often depict human figures, especially celebrities and politicians, with a high degree of surrealism and aesthetic professionalism, alongside a low degree of overt signals of AI production. This study is the first to empirically investigate photorealistic AIGIs across multiple platforms using a mixed-methods approach. Our findings provide important implications and insights for understanding visual misinformation and mitigating potential risks associated with photorealistic AIGIs. We also propose design recommendations to enhance the responsible use of AIGIs.<|reference_end|>
arxiv
@article{peng2024crafting, title={Crafting Synthetic Realities: Examining Visual Realism and Misinformation Potential of Photorealistic AI-Generated Images}, author={Qiyao Peng, Yingdan Lu, Yilang Peng, Sijia Qian, Xinyi Liu, Cuihua Shen}, journal={arXiv preprint arXiv:2409.17484}, year={2024}, archivePrefix={arXiv}, eprint={2409.17484}, primaryClass={cs.CY} }
peng2024crafting
arxiv-662122
2409.17485
Revisiting Deep Ensemble Uncertainty for Enhanced Medical Anomaly Detection
<|reference_start|>Revisiting Deep Ensemble Uncertainty for Enhanced Medical Anomaly Detection: Medical anomaly detection (AD) is crucial in pathological identification and localization. Current methods typically rely on uncertainty estimation in deep ensembles to detect anomalies, assuming that ensemble learners should agree on normal samples while exhibiting disagreement on unseen anomalies in the output space. However, these methods may suffer from inadequate disagreement on anomalies or diminished agreement on normal samples. To tackle these issues, we propose D2UE, a Diversified Dual-space Uncertainty Estimation framework for medical anomaly detection. To effectively balance agreement and disagreement for anomaly detection, we propose Redundancy-Aware Repulsion (RAR), which uses a similarity kernel that remains invariant to both isotropic scaling and orthogonal transformations, explicitly promoting diversity in learners' feature space. Moreover, to accentuate anomalous regions, we develop Dual-Space Uncertainty (DSU), which utilizes the ensemble's uncertainty in input and output spaces. In input space, we first calculate gradients of reconstruction error with respect to input images. The gradients are then integrated with reconstruction outputs to estimate uncertainty for inputs, enabling effective anomaly discrimination even when output space disagreement is minimal. We conduct a comprehensive evaluation of five medical benchmarks with different backbones. Experimental results demonstrate the superiority of our method to state-of-the-art methods and the effectiveness of each component in our framework. Our code is available at https://github.com/Rubiscol/D2UE.<|reference_end|>
arxiv
@article{gu2024revisiting, title={Revisiting Deep Ensemble Uncertainty for Enhanced Medical Anomaly Detection}, author={Yi Gu, Yi Lin, Kwang-Ting Cheng, Hao Chen}, journal={arXiv preprint arXiv:2409.17485}, year={2024}, archivePrefix={arXiv}, eprint={2409.17485}, primaryClass={cs.CV} }
gu2024revisiting
arxiv-662123
2409.17486
Global-Local Medical SAM Adaptor Based on Full Adaption
<|reference_start|>Global-Local Medical SAM Adaptor Based on Full Adaption: Emerging of visual language models, such as the segment anything model (SAM), have made great breakthroughs in the field of universal semantic segmentation and significantly aid the improvements of medical image segmentation, in particular with the help of Medical SAM adaptor (Med-SA). However, Med-SA still can be improved, as it fine-tunes SAM in a partial adaption manner. To resolve this problem, we present a novel global medical SAM adaptor (GMed-SA) with full adaption, which can adapt SAM globally. We further combine GMed-SA and Med-SA to propose a global-local medical SAM adaptor (GLMed-SA) to adapt SAM both globally and locally. Extensive experiments have been performed on the challenging public 2D melanoma segmentation dataset. The results show that GLMed-SA outperforms several state-of-the-art semantic segmentation methods on various evaluation metrics, demonstrating the superiority of our methods.<|reference_end|>
arxiv
@article{wang2024global-local, title={Global-Local Medical SAM Adaptor Based on Full Adaption}, author={Meng Wang (School of Electronic and Information Engineering Liaoning Technical University Xingcheng City, Liaoning Province, P. R. China), Yarong Feng (School of Electronic and Information Engineering Liaoning Technical University Xingcheng City, Liaoning Province, P. R. China), Yongwei Tang (School of Electronic and Information Engineering Liaoning Technical University Xingcheng City, Liaoning Province, P. R. China), Tian Zhang (Software college Northeastern University Shenyang, Liaoning Province, P. R. China), Yuxin Liang (School of Electronic and Information Engineering Liaoning Technical University Xingcheng City, Liaoning Province, P. R. China), Chao Lv (Department of General Surgery, Shengjing Hospital China Medical University Shenyang, Liaoning Province, P. R. China)}, journal={arXiv preprint arXiv:2409.17486}, year={2024}, archivePrefix={arXiv}, eprint={2409.17486}, primaryClass={cs.AI cs.CV} }
wang2024global-local
arxiv-662124
2409.17487
Learning Quantized Adaptive Conditions for Diffusion Models
<|reference_start|>Learning Quantized Adaptive Conditions for Diffusion Models: The curvature of ODE trajectories in diffusion models hinders their ability to generate high-quality images in a few number of function evaluations (NFE). In this paper, we propose a novel and effective approach to reduce trajectory curvature by utilizing adaptive conditions. By employing a extremely light-weight quantized encoder, our method incurs only an additional 1% of training parameters, eliminates the need for extra regularization terms, yet achieves significantly better sample quality. Our approach accelerates ODE sampling while preserving the downstream task image editing capabilities of SDE techniques. Extensive experiments verify that our method can generate high quality results under extremely limited sampling costs. With only 6 NFE, we achieve 5.14 FID on CIFAR-10, 6.91 FID on FFHQ 64x64 and 3.10 FID on AFHQv2.<|reference_end|>
arxiv
@article{liang2024learning, title={Learning Quantized Adaptive Conditions for Diffusion Models}, author={Yuchen Liang, Yuchuan Tian, Lei Yu, Huao Tang, Jie Hu, Xiangzhong Fang and Hanting Chen}, journal={arXiv preprint arXiv:2409.17487}, year={2024}, archivePrefix={arXiv}, eprint={2409.17487}, primaryClass={cs.CV} }
liang2024learning
arxiv-662125
2409.17488
Optimal control of stochastic reaction networks with entropic control cost and emergence of mode-switching strategies
<|reference_start|>Optimal control of stochastic reaction networks with entropic control cost and emergence of mode-switching strategies: Controlling the stochastic dynamics of biological populations is a challenge that arises across various biological contexts. However, these dynamics are inherently nonlinear and involve a discrete state space, i.e., the number of molecules, cells, or organisms. Additionally, the possibility of extinction has a significant impact on both the dynamics and control strategies, particularly when the population size is small. These factors hamper the direct application of conventional control theories to biological systems. To address these challenges, we formulate the optimal control problem for stochastic population dynamics by utilizing a control cost function based on the Kullback-Leibler divergence. This approach naturally accounts for population-specific factors and simplifies the complex nonlinear Hamilton-Jacobi-Bellman equation into a linear form, facilitating efficient computation of optimal solutions. We demonstrate the effectiveness of our approach by applying it to the control of interacting random walkers, Moran processes, and SIR models, and observe the mode-switching phenomena in the control strategies. Our approach provides new opportunities for applying control theory to a wide range of biological problems.<|reference_end|>
arxiv
@article{horiguchi2024optimal, title={Optimal control of stochastic reaction networks with entropic control cost and emergence of mode-switching strategies}, author={Shuhei A. Horiguchi, Tetsuya J. Kobayashi}, journal={arXiv preprint arXiv:2409.17488}, year={2024}, archivePrefix={arXiv}, eprint={2409.17488}, primaryClass={q-bio.PE cs.SY eess.SY math.OC physics.bio-ph q-bio.MN} }
horiguchi2024optimal
arxiv-662126
2409.17490
MathDSL: A Domain-Specific Language for Concise Mathematical Solutions Via Program Synthesis
<|reference_start|>MathDSL: A Domain-Specific Language for Concise Mathematical Solutions Via Program Synthesis: We present MathDSL, a Domain-Specific Language (DSL) for mathematical equation solving, which, when deployed in program synthesis models, outperforms state-of-the-art reinforcement-learning-based methods. We also introduce a quantitative metric for measuring the conciseness of a mathematical solution and demonstrate the improvement in the quality of generated solutions compared to other methods. Our system demonstrates that a program synthesis system (DreamCoder) using MathDSL can generate programs that solve linear equations with greater accuracy and conciseness than using reinforcement learning systems. Additionally, we demonstrate that if we use the action spaces of previous reinforcement learning systems as DSLs, MathDSL outperforms the action-space-DSLs. We use DreamCoder to store equation-solving strategies as learned abstractions in its program library and demonstrate that by using MathDSL, these can be converted into human-interpretable solution strategies that could have applications in mathematical education.<|reference_end|>
arxiv
@article{anupam2024mathdsl:, title={MathDSL: A Domain-Specific Language for Concise Mathematical Solutions Via Program Synthesis}, author={Sagnik Anupam, Maddy Bowers, Omar Costilla-Reyes, Armando Solar-Lezama}, journal={arXiv preprint arXiv:2409.17490}, year={2024}, archivePrefix={arXiv}, eprint={2409.17490}, primaryClass={cs.LG} }
anupam2024mathdsl:
arxiv-662127
2409.17494
From Graphs to Words: A Computer-Assisted Framework for the Production of Accessible Text Descriptions
<|reference_start|>From Graphs to Words: A Computer-Assisted Framework for the Production of Accessible Text Descriptions: In the digital landscape, the ubiquity of data visualizations in media underscores the necessity for accessibility to ensure inclusivity for all users, including those with visual impairments. Current visual content often fails to cater to the needs of screen reader users due to the absence of comprehensive textual descriptions. To address this gap, we propose in this paper a framework designed to empower media content creators to transform charts into descriptive narratives. This tool not only facilitates the understanding of complex visual data through text but also fosters a broader awareness of accessibility in digital content creation. Through the application of this framework, users can interpret and convey the insights of data visualizations more effectively, accommodating a diverse audience. Our evaluations reveal that this tool not only enhances the comprehension of data visualizations but also promotes new perspectives on the represented data, thereby broadening the interpretative possibilities for all users.<|reference_end|>
arxiv
@article{xu2024from, title={From Graphs to Words: A Computer-Assisted Framework for the Production of Accessible Text Descriptions}, author={Qiang Xu, Thomas Hurtut}, journal={arXiv preprint arXiv:2409.17494}, year={2024}, archivePrefix={arXiv}, eprint={2409.17494}, primaryClass={cs.HC} }
xu2024from
arxiv-662128
2409.17495
Human Mobility Modeling with Limited Information via Large Language Models
<|reference_start|>Human Mobility Modeling with Limited Information via Large Language Models: Understanding human mobility patterns has traditionally been a complex challenge in transportation modeling. Due to the difficulties in obtaining high-quality training datasets across diverse locations, conventional activity-based models and learning-based human mobility modeling algorithms are particularly limited by the availability and quality of datasets. Furthermore, current research mainly focuses on the spatial-temporal travel pattern but lacks an understanding of the semantic information between activities, which is crucial for modeling the interdependence between activities. In this paper, we propose an innovative Large Language Model (LLM) empowered human mobility modeling framework. Our proposed approach significantly reduces the reliance on detailed human mobility statistical data, utilizing basic socio-demographic information of individuals to generate their daily mobility patterns. We have validated our results using the NHTS and SCAG-ABM datasets, demonstrating the effective modeling of mobility patterns and the strong adaptability of our framework across various geographic locations.<|reference_end|>
arxiv
@article{liu2024human, title={Human Mobility Modeling with Limited Information via Large Language Models}, author={Yifan Liu, Xishun Liao, Haoxuan Ma, Brian Yueshuai He, Chris Stanford, and Jiaqi Ma}, journal={arXiv preprint arXiv:2409.17495}, year={2024}, archivePrefix={arXiv}, eprint={2409.17495}, primaryClass={cs.AI cs.SI} }
liu2024human
arxiv-662129
2409.17496
Towards Forever Access for Implanted Brain-Computer Interfaces
<|reference_start|>Towards Forever Access for Implanted Brain-Computer Interfaces: Designs for implanted brain-computer interfaces (BCIs) have increased significantly in recent years. Each device promises better clinical outcomes and quality-of-life improvements, yet due to severe and inflexible safety constraints, progress requires tight co-design from materials to circuits and all the way up the stack to applications and algorithms. This trend has become more aggressive over time, forcing clinicians and patients to rely on vendor-specific hardware and software for deployment, maintenance, upgrades, and replacement. This over-reliance is ethically problematic, especially if companies go out-of-business or business objectives diverge from clinical promises. Device heterogeneity additionally burdens clinicians and healthcare facilities, adding complexity and costs for in-clinic visits, monitoring, and continuous access. Reliability, interoperability, portability, and future-proofed design is needed, but this unfortunately comes at a cost. These system features sap resources that would have otherwise been allocated to reduce power/energy and improve performance. Navigating this trade-off in a systematic way is critical to providing patients with forever access to their implants and reducing burdens placed on healthcare providers and caretakers. We study the integration of on-device storage to highlight the sensitivity of this trade-off and establish other points of interest within BCI design that require careful investigation. In the process, we revisit relevant problems in computer architecture and medical devices from the current era of hardware specialization and modern neurotechnology.<|reference_end|>
arxiv
@article{ugur2024towards, title={Towards Forever Access for Implanted Brain-Computer Interfaces}, author={Muhammed Ugur, Raghavendra Pradyumna Pothukuchi, Abhishek Bhattacharjee}, journal={The 1st Workshop on Hot Topics in Ethical Computer Systems, April, 2024}, year={2024}, archivePrefix={arXiv}, eprint={2409.17496}, primaryClass={cs.AR} }
ugur2024towards
arxiv-662130
2409.17497
Precise Interception Flight Targets by Image-based Visual Servoing of Multicopter
<|reference_start|>Precise Interception Flight Targets by Image-based Visual Servoing of Multicopter: Interception of low-altitude intruding targets with low-cost drones equipped strapdown camera presents a competitive option. However, the malicious maneuvers by the non-cooperative target and the coupling of the camera make the task challenging. To solve this problem, an Image-Based Visual Servoing (IBVS) control algorithm based on proportional navigation guidance with field-of-view holding capability is designed. The proposed controller reduces the miss distance while improving the stability of the visual servo system during interception. Software-in-the-loop (SITL) simulation experiments show a 72.8% reduction in the circular error probability (CEP) compared to the most recent study. This improvement enhances interception accuracy from the decimeter to the centimeter level. Real-world experiments further validate the effectiveness of the proposed algorithm.<|reference_end|>
arxiv
@article{yan2024precise, title={Precise Interception Flight Targets by Image-based Visual Servoing of Multicopter}, author={Hailong Yan, Kun Yang, Yixiao Cheng, Zihao Wang, and Dawei Li}, journal={arXiv preprint arXiv:2409.17497}, year={2024}, archivePrefix={arXiv}, eprint={2409.17497}, primaryClass={cs.RO} }
yan2024precise
arxiv-662131
2409.17499
Does Worst-Performing Agent Lead the Pack? Analyzing Agent Dynamics in Unified Distributed SGD
<|reference_start|>Does Worst-Performing Agent Lead the Pack? Analyzing Agent Dynamics in Unified Distributed SGD: Distributed learning is essential to train machine learning algorithms across heterogeneous agents while maintaining data privacy. We conduct an asymptotic analysis of Unified Distributed SGD (UD-SGD), exploring a variety of communication patterns, including decentralized SGD and local SGD within Federated Learning (FL), as well as the increasing communication interval in the FL setting. In this study, we assess how different sampling strategies, such as i.i.d. sampling, shuffling, and Markovian sampling, affect the convergence speed of UD-SGD by considering the impact of agent dynamics on the limiting covariance matrix as described in the Central Limit Theorem (CLT). Our findings not only support existing theories on linear speedup and asymptotic network independence, but also theoretically and empirically show how efficient sampling strategies employed by individual agents contribute to overall convergence in UD-SGD. Simulations reveal that a few agents using highly efficient sampling can achieve or surpass the performance of the majority employing moderately improved strategies, providing new insights beyond traditional analyses focusing on the worst-performing agent.<|reference_end|>
arxiv
@article{hu2024does, title={Does Worst-Performing Agent Lead the Pack? Analyzing Agent Dynamics in Unified Distributed SGD}, author={Jie Hu, Yi-Ting Ma, Do Young Eun}, journal={arXiv preprint arXiv:2409.17499}, year={2024}, archivePrefix={arXiv}, eprint={2409.17499}, primaryClass={cs.LG math.OC stat.ML} }
hu2024does
arxiv-662132
2409.17500
GLinSAT: The General Linear Satisfiability Neural Network Layer By Accelerated Gradient Descent
<|reference_start|>GLinSAT: The General Linear Satisfiability Neural Network Layer By Accelerated Gradient Descent: Ensuring that the outputs of neural networks satisfy specific constraints is crucial for applying neural networks to real-life decision-making problems. In this paper, we consider making a batch of neural network outputs satisfy bounded and general linear constraints. We first reformulate the neural network output projection problem as an entropy-regularized linear programming problem. We show that such a problem can be equivalently transformed into an unconstrained convex optimization problem with Lipschitz continuous gradient according to the duality theorem. Then, based on an accelerated gradient descent algorithm with numerical performance enhancement, we present our architecture, GLinSAT, to solve the problem. To the best of our knowledge, this is the first general linear satisfiability layer in which all the operations are differentiable and matrix-factorization-free. Despite the fact that we can explicitly perform backpropagation based on automatic differentiation mechanism, we also provide an alternative approach in GLinSAT to calculate the derivatives based on implicit differentiation of the optimality condition. Experimental results on constrained traveling salesman problems, partial graph matching with outliers, predictive portfolio allocation and power system unit commitment demonstrate the advantages of GLinSAT over existing satisfiability layers.<|reference_end|>
arxiv
@article{zeng2024glinsat:, title={GLinSAT: The General Linear Satisfiability Neural Network Layer By Accelerated Gradient Descent}, author={Hongtai Zeng, Chao Yang, Yanzhen Zhou, Cheng Yang, Qinglai Guo}, journal={arXiv preprint arXiv:2409.17500}, year={2024}, archivePrefix={arXiv}, eprint={2409.17500}, primaryClass={cs.AI cs.SY eess.SY math.OC} }
zeng2024glinsat:
arxiv-662133
2409.17502
Broadcast Product: Shape-aligned Element-wise Multiplication and Beyond
<|reference_start|>Broadcast Product: Shape-aligned Element-wise Multiplication and Beyond: We propose a new operator defined between two tensors, the broadcast product. The broadcast product calculates the Hadamard product after duplicating elements to align the shapes of the two tensors. Complex tensor operations in libraries like \texttt{numpy} can be succinctly represented as mathematical expressions using the broadcast product. Finally, we propose a novel tensor decomposition using the broadcast product, highlighting its potential applications in dimensionality reduction.<|reference_end|>
arxiv
@article{matsui2024broadcast, title={Broadcast Product: Shape-aligned Element-wise Multiplication and Beyond}, author={Yusuke Matsui and Tatsuya Yokota}, journal={arXiv preprint arXiv:2409.17502}, year={2024}, archivePrefix={arXiv}, eprint={2409.17502}, primaryClass={cs.LG} }
matsui2024broadcast
arxiv-662134
2409.17503
Shape-intensity knowledge distillation for robust medical image segmentation
<|reference_start|>Shape-intensity knowledge distillation for robust medical image segmentation: Many medical image segmentation methods have achieved impressive results. Yet, most existing methods do not take into account the shape-intensity prior information. This may lead to implausible segmentation results, in particular for images of unseen datasets. In this paper, we propose a novel approach to incorporate joint shape-intensity prior information into the segmentation network. Specifically, we first train a segmentation network (regarded as the teacher network) on class-wise averaged training images to extract valuable shape-intensity information, which is then transferred to a student segmentation network with the same network architecture as the teacher via knowledge distillation. In this way, the student network regarded as the final segmentation model can effectively integrate the shape-intensity prior information, yielding more accurate segmentation results. Despite its simplicity, experiments on five medical image segmentation tasks of different modalities demonstrate that the proposed Shape-Intensity Knowledge Distillation (SIKD) consistently improves several baseline models (including recent MaxStyle and SAMed) under intra-dataset evaluation, and significantly improves the cross-dataset generalization ability. The code is available at https://github.com/whdong-whu/SIKD.<|reference_end|>
arxiv
@article{dong2024shape-intensity, title={Shape-intensity knowledge distillation for robust medical image segmentation}, author={Wenhui Dong, Bo Du, Yongchao Xu}, journal={arXiv preprint arXiv:2409.17503}, year={2024}, archivePrefix={arXiv}, eprint={2409.17503}, primaryClass={eess.IV cs.CV} }
dong2024shape-intensity
arxiv-662135
2409.17504
HaloScope: Harnessing Unlabeled LLM Generations for Hallucination Detection
<|reference_start|>HaloScope: Harnessing Unlabeled LLM Generations for Hallucination Detection: The surge in applications of large language models (LLMs) has prompted concerns about the generation of misleading or fabricated information, known as hallucinations. Therefore, detecting hallucinations has become critical to maintaining trust in LLM-generated content. A primary challenge in learning a truthfulness classifier is the lack of a large amount of labeled truthful and hallucinated data. To address the challenge, we introduce HaloScope, a novel learning framework that leverages the unlabeled LLM generations in the wild for hallucination detection. Such unlabeled data arises freely upon deploying LLMs in the open world, and consists of both truthful and hallucinated information. To harness the unlabeled data, we present an automated membership estimation score for distinguishing between truthful and untruthful generations within unlabeled mixture data, thereby enabling the training of a binary truthfulness classifier on top. Importantly, our framework does not require extra data collection and human annotations, offering strong flexibility and practicality for real-world applications. Extensive experiments show that HaloScope can achieve superior hallucination detection performance, outperforming the competitive rivals by a significant margin. Code is available at https://github.com/deeplearningwisc/haloscope.<|reference_end|>
arxiv
@article{du2024haloscope:, title={HaloScope: Harnessing Unlabeled LLM Generations for Hallucination Detection}, author={Xuefeng Du, Chaowei Xiao, Yixuan Li}, journal={arXiv preprint arXiv:2409.17504}, year={2024}, archivePrefix={arXiv}, eprint={2409.17504}, primaryClass={cs.LG cs.CL} }
du2024haloscope:
arxiv-662136
2409.17505
Sequential Kernelized Stein Discrepancy
<|reference_start|>Sequential Kernelized Stein Discrepancy: We present a sequential version of the kernelized Stein discrepancy, which allows for conducting goodness-of-fit tests for unnormalized densities that are continuously monitored and adaptively stopped. That is, the sample size need not be fixed prior to data collection; the practitioner can choose whether to stop the test or continue to gather evidence at any time while controlling the false discovery rate. In stark contrast to related literature, we do not impose uniform boundedness on the Stein kernel. Instead, we exploit the potential boundedness of the Stein kernel at arbitrary point evaluations to define test martingales, that give way to the subsequent novel sequential tests. We prove the validity of the test, as well as an asymptotic lower bound for the logarithmic growth of the wealth process under the alternative. We further illustrate the empirical performance of the test with a variety of distributions, including restricted Boltzmann machines.<|reference_end|>
arxiv
@article{martinez-taboada2024sequential, title={Sequential Kernelized Stein Discrepancy}, author={Diego Martinez-Taboada, Aaditya Ramdas}, journal={arXiv preprint arXiv:2409.17505}, year={2024}, archivePrefix={arXiv}, eprint={2409.17505}, primaryClass={stat.ML cs.LG} }
martinez-taboada2024sequential
arxiv-662137
2409.17506
Optimizing Resource Allocation for Multi-modal Semantic Communication in Mobile AIGC Networks: A Diffusion-based Game Approach
<|reference_start|>Optimizing Resource Allocation for Multi-modal Semantic Communication in Mobile AIGC Networks: A Diffusion-based Game Approach: Mobile Artificial Intelligence-Generated Content (AIGC) networks enable massive users to obtain customized content generation services. However, users still need to download a large number of AIGC outputs from mobile AIGC service providers, which strains communication resources and increases the risk of transmission failures. Fortunately, Semantic Communication (SemCom) can improve transmission efficiency and reliability through semantic information processing. Moreover, recent advances in Generative Artificial Intelligence (GAI) further enhanced the effectiveness of SemCom through its powerful generative capabilities. However, how to strike a balance between high-quality content generation and the size of semantic information transmitted is a major challenge. In this paper, we propose a Generative Diffusion Model (GDM)-based multi-modal SemCom (GM-SemCom) framework. The framework improves the accuracy of information reconstruction by integrating GDMs and multi-modal semantic information and also adopts a controllable extraction module for efficient and controllable problems of unstable data recovery and slow decoding speed in GAI-enabled SemCom. Then, we introduce a novel metric called Age of Semantic Information (AoSI) based on the concept of Age of Information (AoI) to quantify the freshness of semantic information. To address the resource trading problem within the framework, we propose a Stackelberg game model, which integrates the AoSI with psychological factors to provide a comprehensive measure of user utility. Furthermore, we propose a GDM-based algorithm to solve the game under incomplete information. Compared with the traditional deep reinforcement learning algorithms, numerical results demonstrate that the proposed algorithm converges faster and is closer to the Stackelberg equilibrium.<|reference_end|>
arxiv
@article{liu2024optimizing, title={Optimizing Resource Allocation for Multi-modal Semantic Communication in Mobile AIGC Networks: A Diffusion-based Game Approach}, author={Jian Liu, Ming Xiao, Jinbo Wen, Jiawen Kang, Ruichen Zhang, Tao Zhang, Dusit Niyato, Weiting Zhang, Ying Liu}, journal={arXiv preprint arXiv:2409.17506}, year={2024}, archivePrefix={arXiv}, eprint={2409.17506}, primaryClass={cs.NI} }
liu2024optimizing
arxiv-662138
2409.17508
Uni-Med: A Unified Medical Generalist Foundation Model For Multi-Task Learning Via Connector-MoE
<|reference_start|>Uni-Med: A Unified Medical Generalist Foundation Model For Multi-Task Learning Via Connector-MoE: Multi-modal large language models (MLLMs) have shown impressive capabilities as a general-purpose interface for various visual and linguistic tasks. However, building a unified MLLM for multi-task learning in the medical field remains a thorny challenge. To mitigate the tug-of-war problem of multi-modal multi-task optimization, recent advances primarily focus on improving the LLM components, while neglecting the connector that bridges the gap between modalities. In this paper, we introduce Uni-Med, a novel medical generalist foundation model which consists of a universal visual feature extraction module, a connector mixture-of-experts (CMoE) module, and an LLM. Benefiting from the proposed CMoE that leverages a well-designed router with a mixture of projection experts at the connector, Uni-Med achieves efficient solution to the tug-of-war problem and can perform six different medical tasks including question answering, visual question answering, report generation, referring expression comprehension, referring expression generation and image classification. To the best of our knowledge, Uni-Med is the first effort to tackle multi-task interference at the connector. Extensive ablation experiments validate the effectiveness of introducing CMoE under any configuration, with up to an average 8% performance gains. We further provide interpretation analysis of the tug-of-war problem from the perspective of gradient optimization and parameter statistics. Compared to previous state-of-the-art medical MLLMs, Uni-Med achieves competitive or superior evaluation metrics on diverse tasks. Code, data and model will be soon available at GitHub.<|reference_end|>
arxiv
@article{zhu2024uni-med:, title={Uni-Med: A Unified Medical Generalist Foundation Model For Multi-Task Learning Via Connector-MoE}, author={Xun Zhu, Ying Hu, Fanbin Mo, Miao Li, Ji Wu}, journal={arXiv preprint arXiv:2409.17508}, year={2024}, archivePrefix={arXiv}, eprint={2409.17508}, primaryClass={cs.CV cs.AI cs.LG} }
zhu2024uni-med:
arxiv-662139
2409.17509
BioZero: An Efficient and Privacy-Preserving Decentralized Biometric Authentication Protocol on Open Blockchain
<|reference_start|>BioZero: An Efficient and Privacy-Preserving Decentralized Biometric Authentication Protocol on Open Blockchain: Digital identity plays a vital role in enabling secure access to resources and services in the digital world. Traditional identity authentication methods, such as password-based and biometric authentications, have limitations in terms of security, privacy, and scalability. Decentralized authentication approaches leveraging blockchain technology have emerged as a promising solution. However, existing decentralized authentication methods often rely on indirect identity verification (e.g. using passwords or digital signatures as authentication credentials) and face challenges such as Sybil attacks. In this paper, we propose BioZero, an efficient and privacy-preserving decentralized biometric authentication protocol that can be implemented on open blockchain. BioZero leverages Pedersen commitment and homomorphic computation to protect user biometric privacy while enabling efficient verification. We enhance the protocol with non-interactive homomorphic computation and employ zero-knowledge proofs for secure on-chain verification. The unique aspect of BioZero is that it is fully decentralized and can be executed by blockchain smart contracts in a very efficient way. We analyze the security of BioZero and validate its performance through a prototype implementation. The results demonstrate the effectiveness, efficiency, and security of BioZero in decentralized authentication scenarios. Our work contributes to the advancement of decentralized identity authentication using biometrics.<|reference_end|>
arxiv
@article{lai2024biozero:, title={BioZero: An Efficient and Privacy-Preserving Decentralized Biometric Authentication Protocol on Open Blockchain}, author={Junhao Lai, Taotao Wang, Shengli Zhang, Qing Yang, and Soung Chang Liew}, journal={arXiv preprint arXiv:2409.17509}, year={2024}, archivePrefix={arXiv}, eprint={2409.17509}, primaryClass={cs.CR} }
lai2024biozero:
arxiv-662140
2409.17510
NeuroPath: A Neural Pathway Transformer for Joining the Dots of Human Connectomes
<|reference_start|>NeuroPath: A Neural Pathway Transformer for Joining the Dots of Human Connectomes: Although modern imaging technologies allow us to study connectivity between two distinct brain regions in-vivo, an in-depth understanding of how anatomical structure supports brain function and how spontaneous functional fluctuations emerge remarkable cognition is still elusive. Meanwhile, tremendous efforts have been made in the realm of machine learning to establish the nonlinear mapping between neuroimaging data and phenotypic traits. However, the absence of neuroscience insight in the current approaches poses significant challenges in understanding cognitive behavior from transient neural activities. To address this challenge, we put the spotlight on the coupling mechanism of structural connectivity (SC) and functional connectivity (FC) by formulating such network neuroscience question into an expressive graph representation learning problem for high-order topology. Specifically, we introduce the concept of topological detour to characterize how a ubiquitous instance of FC (direct link) is supported by neural pathways (detour) physically wired by SC, which forms a cyclic loop interacted by brain structure and function. In the clich\'e of machine learning, the multi-hop detour pathway underlying SC-FC coupling allows us to devise a novel multi-head self-attention mechanism within Transformer to capture multi-modal feature representation from paired graphs of SC and FC. Taken together, we propose a biological-inspired deep model, coined as NeuroPath, to find putative connectomic feature representations from the unprecedented amount of neuroimages, which can be plugged into various downstream applications such as task recognition and disease diagnosis. We have evaluated NeuroPath on large-scale public datasets including HCP and UK Biobank under supervised and zero-shot learning, where the state-of-the-art performance by our NeuroPath indicates great potential in network neuroscience.<|reference_end|>
arxiv
@article{wei2024neuropath:, title={NeuroPath: A Neural Pathway Transformer for Joining the Dots of Human Connectomes}, author={Ziquan Wei, Tingting Dan, Jiaqi Ding, Guorong Wu}, journal={arXiv preprint arXiv:2409.17510}, year={2024}, archivePrefix={arXiv}, eprint={2409.17510}, primaryClass={q-bio.NC cs.AI cs.CV cs.LG} }
wei2024neuropath:
arxiv-662141
2409.17512
SCOMatch: Alleviating Overtrusting in Open-set Semi-supervised Learning
<|reference_start|>SCOMatch: Alleviating Overtrusting in Open-set Semi-supervised Learning: Open-set semi-supervised learning (OSSL) leverages practical open-set unlabeled data, comprising both in-distribution (ID) samples from seen classes and out-of-distribution (OOD) samples from unseen classes, for semi-supervised learning (SSL). Prior OSSL methods initially learned the decision boundary between ID and OOD with labeled ID data, subsequently employing self-training to refine this boundary. These methods, however, suffer from the tendency to overtrust the labeled ID data: the scarcity of labeled data caused the distribution bias between the labeled samples and the entire ID data, which misleads the decision boundary to overfit. The subsequent self-training process, based on the overfitted result, fails to rectify this problem. In this paper, we address the overtrusting issue by treating OOD samples as an additional class, forming a new SSL process. Specifically, we propose SCOMatch, a novel OSSL method that 1) selects reliable OOD samples as new labeled data with an OOD memory queue and a corresponding update strategy and 2) integrates the new SSL process into the original task through our Simultaneous Close-set and Open-set self-training. SCOMatch refines the decision boundary of ID and OOD classes across the entire dataset, thereby leading to improved results. Extensive experimental results show that SCOMatch significantly outperforms the state-of-the-art methods on various benchmarks. The effectiveness is further verified through ablation studies and visualization.<|reference_end|>
arxiv
@article{wang2024scomatch:, title={SCOMatch: Alleviating Overtrusting in Open-set Semi-supervised Learning}, author={Zerun Wang, Liuyu Xiang, Lang Huang, Jiafeng Mao, Ling Xiao, Toshihiko Yamasaki}, journal={arXiv preprint arXiv:2409.17512}, year={2024}, archivePrefix={arXiv}, eprint={2409.17512}, primaryClass={cs.CV} }
wang2024scomatch:
arxiv-662142
2409.17513
Comparing Unidirectional, Bidirectional, and Word2vec Models for Discovering Vulnerabilities in Compiled Lifted Code
<|reference_start|>Comparing Unidirectional, Bidirectional, and Word2vec Models for Discovering Vulnerabilities in Compiled Lifted Code: Ransomware and other forms of malware cause significant financial and operational damage to organizations by exploiting long-standing and often difficult-to-detect software vulnerabilities. To detect vulnerabilities such as buffer overflows in compiled code, this research investigates the application of unidirectional transformer-based embeddings, specifically GPT-2. Using a dataset of LLVM functions, we trained a GPT-2 model to generate embeddings, which were subsequently used to build LSTM neural networks to differentiate between vulnerable and non-vulnerable code. Our study reveals that embeddings from the GPT-2 model significantly outperform those from bidirectional models of BERT and RoBERTa, achieving an accuracy of 92.5% and an F1-score of 89.7%. LSTM neural networks were developed with both frozen and unfrozen embedding model layers. The model with the highest performance was achieved when the embedding layers were unfrozen. Further, the research finds that, in exploring the impact of different optimizers within this domain, the SGD optimizer demonstrates superior performance over Adam. Overall, these findings reveal important insights into the potential of unidirectional transformer-based approaches in enhancing cybersecurity defenses.<|reference_end|>
arxiv
@article{mccully2024comparing, title={Comparing Unidirectional, Bidirectional, and Word2vec Models for Discovering Vulnerabilities in Compiled Lifted Code}, author={Gary A. McCully, John D. Hastings, Shengjie Xu, Adam Fortier}, journal={arXiv preprint arXiv:2409.17513}, year={2024}, archivePrefix={arXiv}, eprint={2409.17513}, primaryClass={cs.CR cs.CL cs.LG cs.SE} }
mccully2024comparing
arxiv-662143
2409.17515
From News to Forecast: Integrating Event Analysis in LLM-Based Time Series Forecasting with Reflection
<|reference_start|>From News to Forecast: Integrating Event Analysis in LLM-Based Time Series Forecasting with Reflection: This paper introduces a novel approach that leverages Large Language Models (LLMs) and Generative Agents to enhance time series forecasting by reasoning across both text and time series data. With language as a medium, our method adaptively integrates social events into forecasting models, aligning news content with time series fluctuations to provide richer insights. Specifically, we utilize LLM-based agents to iteratively filter out irrelevant news and employ human-like reasoning to evaluate predictions. This enables the model to analyze complex events, such as unexpected incidents and shifts in social behavior, and continuously refine the selection logic of news and the robustness of the agent's output. By integrating selected news events with time series data, we fine-tune a pre-trained LLM to predict sequences of digits in time series. The results demonstrate significant improvements in forecasting accuracy, suggesting a potential paradigm shift in time series forecasting through the effective utilization of unstructured news data.<|reference_end|>
arxiv
@article{wang2024from, title={From News to Forecast: Integrating Event Analysis in LLM-Based Time Series Forecasting with Reflection}, author={Xinlei Wang, Maike Feng, Jing Qiu, Jinjin Gu, Junhua Zhao}, journal={arXiv preprint arXiv:2409.17515}, year={2024}, archivePrefix={arXiv}, eprint={2409.17515}, primaryClass={cs.AI} }
wang2024from
arxiv-662144
2409.17516
Functional Classification of Spiking Signal Data Using Artificial Intelligence Techniques: A Review
<|reference_start|>Functional Classification of Spiking Signal Data Using Artificial Intelligence Techniques: A Review: Human brain neuron activities are incredibly significant nowadays. Neuronal behavior is assessed by analyzing signal data such as electroencephalography (EEG), which can offer scientists valuable information about diseases and human-computer interaction. One of the difficulties researchers confront while evaluating these signals is the existence of large volumes of spike data. Spikes are some considerable parts of signal data that can happen as a consequence of vital biomarkers or physical issues such as electrode movements. Hence, distinguishing types of spikes is important. From this spot, the spike classification concept commences. Previously, researchers classified spikes manually. The manual classification was not precise enough as it involves extensive analysis. Consequently, Artificial Intelligence (AI) was introduced into neuroscience to assist clinicians in classifying spikes correctly. This review discusses the importance and use of AI in spike classification, focusing on the recognition of neural activity noises. The task is divided into three main components: preprocessing, classification, and evaluation. Existing methods are introduced and their importance is determined. The review also highlights the need for more efficient algorithms. The primary goal is to provide a perspective on spike classification for future research and provide a comprehensive understanding of the methodologies and issues involved. The review organizes materials in the spike classification field for future studies. In this work, numerous studies were extracted from different databases. The PRISMA-related research guidelines were then used to choose papers. Then, research studies based on spike classification using machine learning and deep learning approaches with effective preprocessing were selected.<|reference_end|>
arxiv
@article{sharifrazi2024functional, title={Functional Classification of Spiking Signal Data Using Artificial Intelligence Techniques: A Review}, author={Danial Sharifrazi, Nouman Javed, Javad Hassannataj Joloudari, Roohallah Alizadehsani, Prasad N. Paradkar, Ru-San Tan, U. Rajendra Acharya, Asim Bhatti}, journal={arXiv preprint arXiv:2409.17516}, year={2024}, archivePrefix={arXiv}, eprint={2409.17516}, primaryClass={cs.AI cs.LG q-bio.NC} }
sharifrazi2024functional
arxiv-662145
2409.17517
Dataset Distillation-based Hybrid Federated Learning on Non-IID Data
<|reference_start|>Dataset Distillation-based Hybrid Federated Learning on Non-IID Data: In federated learning, the heterogeneity of client data has a great impact on the performance of model training. Many heterogeneity issues in this process are raised by non-independently and identically distributed (Non-IID) data. This study focuses on the issue of label distribution skew. To address it, we propose a hybrid federated learning framework called HFLDD, which integrates dataset distillation to generate approximately independent and equally distributed (IID) data, thereby improving the performance of model training. Particularly, we partition the clients into heterogeneous clusters, where the data labels among different clients within a cluster are unbalanced while the data labels among different clusters are balanced. The cluster headers collect distilled data from the corresponding cluster members, and conduct model training in collaboration with the server. This training process is like traditional federated learning on IID data, and hence effectively alleviates the impact of Non-IID data on model training. Furthermore, we compare our proposed method with typical baseline methods on public datasets. Experimental results demonstrate that when the data labels are severely imbalanced, the proposed HFLDD outperforms the baseline methods in terms of both test accuracy and communication cost.<|reference_end|>
arxiv
@article{shi2024dataset, title={Dataset Distillation-based Hybrid Federated Learning on Non-IID Data}, author={Xiufang Shi, Wei Zhang, Mincheng Wu, Guangyi Liu, Zhenyu Wen, Shibo He, Tejal Shah, Rajiv Ranjan}, journal={arXiv preprint arXiv:2409.17517}, year={2024}, archivePrefix={arXiv}, eprint={2409.17517}, primaryClass={cs.LG cs.AI} }
shi2024dataset
arxiv-662146
2409.17518
Multi-Designated Detector Watermarking for Language Models
<|reference_start|>Multi-Designated Detector Watermarking for Language Models: In this paper, we initiate the study of \emph{multi-designated detector watermarking (MDDW)} for large language models (LLMs). This technique allows model providers to generate watermarked outputs from LLMs with two key properties: (i) only specific, possibly multiple, designated detectors can identify the watermarks, and (ii) there is no perceptible degradation in the output quality for ordinary users. We formalize the security definitions for MDDW and present a framework for constructing MDDW for any LLM using multi-designated verifier signatures (MDVS). Recognizing the significant economic value of LLM outputs, we introduce claimability as an optional security feature for MDDW, enabling model providers to assert ownership of LLM outputs within designated-detector settings. To support claimable MDDW, we propose a generic transformation converting any MDVS to a claimable MDVS. Our implementation of the MDDW scheme highlights its advanced functionalities and flexibility over existing methods, with satisfactory performance metrics.<|reference_end|>
arxiv
@article{huang2024multi-designated, title={Multi-Designated Detector Watermarking for Language Models}, author={Zhengan Huang, Gongxian Zeng, Xin Mu, Yu Wang, Yue Yu}, journal={arXiv preprint arXiv:2409.17518}, year={2024}, archivePrefix={arXiv}, eprint={2409.17518}, primaryClass={cs.CR cs.AI} }
huang2024multi-designated
arxiv-662147
2409.17519
Robotic Environmental State Recognition with Pre-Trained Vision-Language Models and Black-Box Optimization
<|reference_start|>Robotic Environmental State Recognition with Pre-Trained Vision-Language Models and Black-Box Optimization: In order for robots to autonomously navigate and operate in diverse environments, it is essential for them to recognize the state of their environment. On the other hand, the environmental state recognition has traditionally involved distinct methods tailored to each state to be recognized. In this study, we perform a unified environmental state recognition for robots through the spoken language with pre-trained large-scale vision-language models. We apply Visual Question Answering and Image-to-Text Retrieval, which are tasks of Vision-Language Models. We show that with our method, it is possible to recognize not only whether a room door is open/closed, but also whether a transparent door is open/closed and whether water is running in a sink, without training neural networks or manual programming. In addition, the recognition accuracy can be improved by selecting appropriate texts from the set of prepared texts based on black-box optimization. For each state recognition, only the text set and its weighting need to be changed, eliminating the need to prepare multiple different models and programs, and facilitating the management of source code and computer resource. We experimentally demonstrate the effectiveness of our method and apply it to the recognition behavior on a mobile robot, Fetch.<|reference_end|>
arxiv
@article{kawaharazuka2024robotic, title={Robotic Environmental State Recognition with Pre-Trained Vision-Language Models and Black-Box Optimization}, author={Kento Kawaharazuka, Yoshiki Obinata, Naoaki Kanazawa, Kei Okada, and Masayuki Inaba}, journal={arXiv preprint arXiv:2409.17519}, year={2024}, doi={10.1080/01691864.2024.2366995}, archivePrefix={arXiv}, eprint={2409.17519}, primaryClass={cs.RO cs.AI cs.CV} }
kawaharazuka2024robotic
arxiv-662148
2409.17523
EAGLE: Egocentric AGgregated Language-video Engine
<|reference_start|>EAGLE: Egocentric AGgregated Language-video Engine: The rapid evolution of egocentric video analysis brings new insights into understanding human activities and intentions from a first-person perspective. Despite this progress, the fragmentation in tasks like action recognition, procedure learning, and moment retrieval, \etc, coupled with inconsistent annotations and isolated model development, hinders a holistic interpretation of video content. In response, we introduce the EAGLE (Egocentric AGgregated Language-video Engine) model and the EAGLE-400K dataset to provide a unified framework that integrates various egocentric video understanding tasks. EAGLE-400K, the \textit{first} large-scale instruction-tuning dataset tailored for egocentric video, features 400K diverse samples to enhance a broad spectrum of tasks from activity recognition to procedure knowledge learning. Moreover, EAGLE, a strong video multimodal large language model (MLLM), is designed to effectively capture both spatial and temporal information. In addition, we propose a set of evaluation metrics designed to facilitate a thorough assessment of MLLM for egocentric video understanding. Our extensive experiments demonstrate EAGLE's superior performance over existing models, highlighting its ability to balance task-specific understanding with holistic video interpretation. With EAGLE, we aim to pave the way for research opportunities and practical applications in real-world scenarios.<|reference_end|>
arxiv
@article{bi2024eagle:, title={EAGLE: Egocentric AGgregated Language-video Engine}, author={Jing Bi, Yunlong Tang, Luchuan Song, Ali Vosoughi, Nguyen Nguyen, and Chenliang Xu}, journal={arXiv preprint arXiv:2409.17523}, year={2024}, doi={10.1145/3664647.3681618}, archivePrefix={arXiv}, eprint={2409.17523}, primaryClass={cs.CV cs.AI} }
bi2024eagle:
arxiv-662149
2409.17524
JoyType: A Robust Design for Multilingual Visual Text Creation
<|reference_start|>JoyType: A Robust Design for Multilingual Visual Text Creation: Generating images with accurately represented text, especially in non-Latin languages, poses a significant challenge for diffusion models. Existing approaches, such as the integration of hint condition diagrams via auxiliary networks (e.g., ControlNet), have made strides towards addressing this issue. However, diffusion models often fall short in tasks requiring controlled text generation, such as specifying particular fonts or producing text in small fonts. In this paper, we introduce a novel approach for multilingual visual text creation, named JoyType, designed to maintain the font style of text during the image generation process. Our methodology begins with assembling a training dataset, JoyType-1M, comprising 1 million pairs of data. Each pair includes an image, its description, and glyph instructions corresponding to the font style within the image. We then developed a text control network, Font ControlNet, tasked with extracting font style information to steer the image generation. To further enhance our model's ability to maintain font style, notably in generating small-font text, we incorporated a multi-layer OCR-aware loss into the diffusion process. This enhancement allows JoyType to direct text rendering using low-level descriptors. Our evaluations, based on both visual and accuracy metrics, demonstrate that JoyType significantly outperforms existing state-of-the-art methods. Additionally, JoyType can function as a plugin, facilitating the creation of varied image styles in conjunction with other stable diffusion models on HuggingFace and CivitAI. Our project is open-sourced on https://jdh-algo.github.io/JoyType/.<|reference_end|>
arxiv
@article{li2024joytype:, title={JoyType: A Robust Design for Multilingual Visual Text Creation}, author={Chao Li, Chen Jiang, Xiaolong Liu, Jun Zhao, Guoxin Wang}, journal={arXiv preprint arXiv:2409.17524}, year={2024}, archivePrefix={arXiv}, eprint={2409.17524}, primaryClass={cs.CV} }
li2024joytype:
arxiv-662150
2409.17525
When A Man Says He Is Pregnant: ERP Evidence for A Rational Account of Speaker-contextualized Language Comprehension
<|reference_start|>When A Man Says He Is Pregnant: ERP Evidence for A Rational Account of Speaker-contextualized Language Comprehension: Spoken language is often, if not always, understood in a context that includes the identities of speakers. For instance, we can easily make sense of an utterance such as "I'm going to have a manicure this weekend" or "The first time I got pregnant I had a hard time" when the utterance is spoken by a woman, but it would be harder to understand when it is spoken by a man. Previous event-related potential (ERP) studies have shown mixed results regarding the neurophysiological responses to such speaker-mismatched utterances, with some reporting an N400 effect and others a P600 effect. In an experiment involving 64 participants, we showed that these different ERP effects reflect distinct cognitive processes employed to resolve the speaker-message mismatch. When possible, the message is integrated with the speaker context to arrive at an interpretation, as in the case of violations of social stereotypes (e.g., men getting a manicure), resulting in an N400 effect. However, when such integration is impossible due to violations of biological knowledge (e.g., men getting pregnant), listeners engage in an error correction process to revise either the perceived utterance or the speaker context, resulting in a P600 effect. Additionally, we found that the social N400 effect decreased as a function of the listener's personality trait of openness, while the biological P600 effect remained robust. Our findings help to reconcile the empirical inconsistencies in the literature and provide a rational account of speaker-contextualized language comprehension.<|reference_end|>
arxiv
@article{wu2024when, title={When A Man Says He Is Pregnant: ERP Evidence for A Rational Account of Speaker-contextualized Language Comprehension}, author={Hanlin Wu and Zhenguang G. Cai}, journal={arXiv preprint arXiv:2409.17525}, year={2024}, archivePrefix={arXiv}, eprint={2409.17525}, primaryClass={q-bio.NC cs.CL} }
wu2024when
arxiv-662151
2409.17526
Drone Stereo Vision for Radiata Pine Branch Detection and Distance Measurement: Integrating SGBM and Segmentation Models
<|reference_start|>Drone Stereo Vision for Radiata Pine Branch Detection and Distance Measurement: Integrating SGBM and Segmentation Models: Manual pruning of radiata pine trees presents significant safety risks due to their substantial height and the challenging terrains in which they thrive. To address these risks, this research proposes the development of a drone-based pruning system equipped with specialized pruning tools and a stereo vision camera, enabling precise detection and trimming of branches. Deep learning algorithms, including YOLO and Mask R-CNN, are employed to ensure accurate branch detection, while the Semi-Global Matching algorithm is integrated to provide reliable distance estimation. The synergy between these techniques facilitates the precise identification of branch locations and enables efficient, targeted pruning. Experimental results demonstrate that the combined implementation of YOLO and SGBM enables the drone to accurately detect branches and measure their distances from the drone. This research not only improves the safety and efficiency of pruning operations but also makes a significant contribution to the advancement of drone technology in the automation of agricultural and forestry practices, laying a foundational framework for further innovations in environmental management.<|reference_end|>
arxiv
@article{lin2024drone, title={Drone Stereo Vision for Radiata Pine Branch Detection and Distance Measurement: Integrating SGBM and Segmentation Models}, author={Yida Lin, Bing Xue, Mengjie Zhang, Sam Schofield, Richard Green}, journal={arXiv preprint arXiv:2409.17526}, year={2024}, archivePrefix={arXiv}, eprint={2409.17526}, primaryClass={cs.CV cs.AI} }
lin2024drone
arxiv-662152
2409.17527
Data Proportion Detection for Optimized Data Management for Large Language Models
<|reference_start|>Data Proportion Detection for Optimized Data Management for Large Language Models: Large language models (LLMs) have demonstrated exceptional performance across a wide range of tasks and domains, with data preparation playing a critical role in achieving these results. Pre-training data typically combines information from multiple domains. To maximize performance when integrating data from various domains, determining the optimal data proportion is essential. However, state-of-the-art (SOTA) LLMs rarely disclose details about their pre-training data, making it difficult for researchers to identify ideal data proportions. In this paper, we introduce a new topic, \textit{data proportion detection}, which enables the automatic estimation of pre-training data proportions by analyzing the generated outputs of LLMs. We provide rigorous theoretical proofs, practical algorithms, and preliminary experimental results for data proportion detection. Based on these findings, we offer valuable insights into the challenges and future directions for effective data proportion detection and data management.<|reference_end|>
arxiv
@article{liang2024data, title={Data Proportion Detection for Optimized Data Management for Large Language Models}, author={Hao Liang, Keshi Zhao, Yajie Yang, Bin Cui, Guosheng Dong, Zenan Zhou, Wentao Zhang}, journal={arXiv preprint arXiv:2409.17527}, year={2024}, archivePrefix={arXiv}, eprint={2409.17527}, primaryClass={cs.CL} }
liang2024data
arxiv-662153
2409.17531
SimVG: A Simple Framework for Visual Grounding with Decoupled Multi-modal Fusion
<|reference_start|>SimVG: A Simple Framework for Visual Grounding with Decoupled Multi-modal Fusion: Visual grounding is a common vision task that involves grounding descriptive sentences to the corresponding regions of an image. Most existing methods use independent image-text encoding and apply complex hand-crafted modules or encoder-decoder architectures for modal interaction and query reasoning. However, their performance significantly drops when dealing with complex textual expressions. This is because the former paradigm only utilizes limited downstream data to fit the multi-modal feature fusion. Therefore, it is only effective when the textual expressions are relatively simple. In contrast, given the wide diversity of textual expressions and the uniqueness of downstream training data, the existing fusion module, which extracts multimodal content from a visual-linguistic context, has not been fully investigated. In this paper, we present a simple yet robust transformer-based framework, SimVG, for visual grounding. Specifically, we decouple visual-linguistic feature fusion from downstream tasks by leveraging existing multimodal pre-trained models and incorporating additional object tokens to facilitate deep integration of downstream and pre-training tasks. Furthermore, we design a dynamic weight-balance distillation method in the multi-branch synchronous learning process to enhance the representation capability of the simpler branch. This branch only consists of a lightweight MLP, which simplifies the structure and improves reasoning speed. Experiments on six widely used VG datasets, i.e., RefCOCO/+/g, ReferIt, Flickr30K, and GRefCOCO, demonstrate the superiority of SimVG. Finally, the proposed method not only achieves improvements in efficiency and convergence speed but also attains new state-of-the-art performance on these benchmarks. Codes and models will be available at \url{https://github.com/Dmmm1997/SimVG}.<|reference_end|>
arxiv
@article{dai2024simvg:, title={SimVG: A Simple Framework for Visual Grounding with Decoupled Multi-modal Fusion}, author={Ming Dai, Lingfeng Yang, Yihao Xu, Zhenhua Feng, Wankou Yang}, journal={arXiv preprint arXiv:2409.17531}, year={2024}, archivePrefix={arXiv}, eprint={2409.17531}, primaryClass={cs.CV cs.AI} }
dai2024simvg:
arxiv-662154
2409.17533
CAMOT: Camera Angle-aware Multi-Object Tracking
<|reference_start|>CAMOT: Camera Angle-aware Multi-Object Tracking: This paper proposes CAMOT, a simple camera angle estimator for multi-object tracking to tackle two problems: 1) occlusion and 2) inaccurate distance estimation in the depth direction. Under the assumption that multiple objects are located on a flat plane in each video frame, CAMOT estimates the camera angle using object detection. In addition, it gives the depth of each object, enabling pseudo-3D MOT. We evaluated its performance by adding it to various 2D MOT methods on the MOT17 and MOT20 datasets and confirmed its effectiveness. Applying CAMOT to ByteTrack, we obtained 63.8% HOTA, 80.6% MOTA, and 78.5% IDF1 in MOT17, which are state-of-the-art results. Its computational cost is significantly lower than the existing deep-learning-based depth estimators for tracking.<|reference_end|>
arxiv
@article{limanta2024camot:, title={CAMOT: Camera Angle-aware Multi-Object Tracking}, author={Felix Limanta, Kuniaki Uto, Koichi Shinoda}, journal={WACV 2024}, year={2024}, doi={10.1109/WACV57701.2024.00635}, archivePrefix={arXiv}, eprint={2409.17533}, primaryClass={cs.CV} }
limanta2024camot:
arxiv-662155
2409.17534
Just say what you want: only-prompting self-rewarding online preference optimization
<|reference_start|>Just say what you want: only-prompting self-rewarding online preference optimization: We address the challenge of online Reinforcement Learning from Human Feedback (RLHF) with a focus on self-rewarding alignment methods. In online RLHF, obtaining feedback requires interaction with the environment, which can be costly when using additional reward models or the GPT-4 API. Current self-rewarding approaches rely heavily on the discriminator's judgment capabilities, which are effective for large-scale models but challenging to transfer to smaller ones. To address these limitations, we propose a novel, only-prompting self-rewarding online algorithm that generates preference datasets without relying on judgment capabilities. Additionally, we employ fine-grained arithmetic control over the optimality gap between positive and negative examples, generating more hard negatives in the later stages of training to help the model better capture subtle human preferences. Finally, we conduct extensive experiments on two base models, Mistral-7B and Mistral-Instruct-7B, which significantly bootstrap the performance of the reference model, achieving 34.5% in the Length-controlled Win Rates of AlpacaEval 2.0.<|reference_end|>
arxiv
@article{xu2024just, title={Just Say What You Want: Only-prompting Self-rewarding Online Preference Optimization}, author={Ruijie Xu, Zhihan Liu, Yongfei Liu, Shipeng Yan, Zhaoran Wang, Zhi Zhang and Xuming He}, journal={arXiv preprint arXiv:2409.17534}, year={2024}, archivePrefix={arXiv}, eprint={2409.17534}, primaryClass={cs.AI} }
xu2024just
arxiv-662156
2409.17535
Privacy-Preserving Redaction of Diagnosis Data through Source Code Analysis
<|reference_start|>Privacy-Preserving Redaction of Diagnosis Data through Source Code Analysis: Protecting sensitive information in diagnostic data such as logs, is a critical concern in the industrial software diagnosis and debugging process. While there are many tools developed to automatically redact the logs for identifying and removing sensitive information, they have severe limitations which can cause either over redaction and loss of critical diagnostic information (false positives), or disclosure of sensitive information (false negatives), or both. To address the problem, in this paper, we argue for a source code analysis approach for log redaction. To identify a log message containing sensitive information, our method locates the corresponding log statement in the source code with logger code augmentation, and checks if the log statement outputs data from sensitive sources by using the data flow graph built from the source code. Appropriate redaction rules are further applied depending on the sensitiveness of the data sources to preserve the privacy information in the logs. We conducted experimental evaluation and comparison with other popular baselines. The results demonstrate that our approach can significantly improve the detection precision of the sensitive information and reduce both false positives and negatives.<|reference_end|>
arxiv
@article{zhou2024privacy-preserving, title={Privacy-Preserving Redaction of Diagnosis Data through Source Code Analysis}, author={Lixi Zhou, Lei Yu, Jia Zou, Hong Min}, journal={Proceedings of the 35th International Conference on Scientific and Statistical Database Management (SSDBM 2023)}, year={2024}, doi={10.1145/3603719.3603734}, archivePrefix={arXiv}, eprint={2409.17535}, primaryClass={cs.CR} }
zhou2024privacy-preserving
arxiv-662157
2409.17536
MUSE: Integrating Multi-Knowledge for Knowledge Graph Completion
<|reference_start|>MUSE: Integrating Multi-Knowledge for Knowledge Graph Completion: Knowledge Graph Completion (KGC) aims to predict the missing [relation] part of (head entity)--[relation]->(tail entity) triplet. Most existing KGC methods focus on single features (e.g., relation types) or sub-graph aggregation. However, they do not fully explore the Knowledge Graph (KG) features and neglect the guidance of external semantic knowledge. To address these shortcomings, we propose a knowledge-aware reasoning model (MUSE), which designs a novel multi-knowledge representation learning mechanism for missing relation prediction. Our model develops a tailored embedding space through three parallel components: 1) Prior Knowledge Learning for enhancing the triplets' semantic representation by fine-tuning BERT; 2) Context Message Passing for enhancing the context messages of KG; 3) Relational Path Aggregation for enhancing the path representation from the head entity to the tail entity. The experimental results show that MUSE significantly outperforms other baselines on four public datasets, achieving over 5.50% H@1 improvement and 4.20% MRR improvement on the NELL995 dataset. The code and datasets will be released via https://github.com/SUSTech-TP/ADMA2024-MUSE.git.<|reference_end|>
arxiv
@article{liu2024muse:, title={MUSE: Integrating Multi-Knowledge for Knowledge Graph Completion}, author={Pengjie Liu}, journal={arXiv preprint arXiv:2409.17536}, year={2024}, archivePrefix={arXiv}, eprint={2409.17536}, primaryClass={cs.CL} }
liu2024muse:
arxiv-662158
2409.17538
On the Implicit Relation Between Low-Rank Adaptation and Differential Privacy
<|reference_start|>On the Implicit Relation Between Low-Rank Adaptation and Differential Privacy: A significant approach in natural language processing involves large-scale pre-training on general domain data followed by adaptation to specific tasks or domains. As models grow in size, full fine-tuning all parameters becomes increasingly impractical. To address this, some methods for low-rank task adaptation of language models have been proposed, e.g. LoRA and FLoRA. These methods keep the pre-trained model weights fixed and incorporate trainable low-rank decomposition matrices into some layers of the transformer architecture, called adapters. This approach significantly reduces the number of trainable parameters required for downstream tasks compared to full fine-tuning all parameters. In this work, we look at low-rank adaptation from the lens of data privacy. We show theoretically that the low-rank adaptation used in LoRA and FLoRA is equivalent to injecting some random noise into the batch gradients w.r.t the adapter parameters coming from their full fine-tuning, and we quantify the variance of the injected noise. By establishing a Berry-Esseen type bound on the total variation distance between the noise distribution and a Gaussian distribution with the same variance, we show that the dynamics of LoRA and FLoRA are very close to differentially private full fine-tuning the adapters, which suggests that low-rank adaptation implicitly provides privacy w.r.t the fine-tuning data. Finally, using Johnson-Lindenstrauss lemma, we show that when augmented with gradient clipping, low-rank adaptation is almost equivalent to differentially private full fine-tuning adapters with a fixed noise scale.<|reference_end|>
arxiv
@article{malekmohammadi2024on, title={On the Implicit Relation Between Low-Rank Adaptation and Differential Privacy}, author={Saber Malekmohammadi, Golnoosh Farnadi}, journal={arXiv preprint arXiv:2409.17538}, year={2024}, archivePrefix={arXiv}, eprint={2409.17538}, primaryClass={cs.LG cs.AI cs.CL} }
malekmohammadi2024on
arxiv-662159
2409.17539
Logic-of-Thought: Injecting Logic into Contexts for Full Reasoning in Large Language Models
<|reference_start|>Logic-of-Thought: Injecting Logic into Contexts for Full Reasoning in Large Language Models: Large Language Models (LLMs) have demonstrated remarkable capabilities across various tasks but their performance in complex logical reasoning tasks remains unsatisfactory. Although some prompting methods, such as Chain-of-Thought, can improve the reasoning ability of LLMs to some extent, they suffer from an unfaithful issue where derived conclusions may not align with the generated reasoning chain. To address this issue, some studies employ the approach of propositional logic to further enhance logical reasoning abilities of LLMs. However, the potential omissions in the extraction of logical expressions in these methods can cause information loss in the logical reasoning process, thereby generating incorrect results. To this end, we propose Logic-of-Thought (LoT) prompting which employs propositional logic to generate expanded logical information from input context, and utilizes the generated logical information as an additional augmentation to the input prompts, thereby enhancing the capability of logical reasoning. The LoT is orthogonal to existing prompting methods and can be seamlessly integrated with them. Extensive experiments demonstrate that LoT boosts the performance of various prompting methods with a striking margin across five logical reasoning tasks. In particular, the LoT enhances Chain-of-Thought's performance on the ReClor dataset by +4.35%; moreover, it improves Chain-of-Thought with Self-Consistency's performance on LogiQA by +5%; additionally, it boosts performance of Tree-of-Thoughts on ProofWriter dataset by +8%.<|reference_end|>
arxiv
@article{liu2024logic-of-thought:, title={Logic-of-Thought: Injecting Logic into Contexts for Full Reasoning in Large Language Models}, author={Tongxuan Liu, Wenjiang Xu, Weizhe Huang, Xingyu Wang, Jiaxing Wang, Hailong Yang, Jing Li}, journal={arXiv preprint arXiv:2409.17539}, year={2024}, archivePrefix={arXiv}, eprint={2409.17539}, primaryClass={cs.CL} }
liu2024logic-of-thought:
arxiv-662160
2409.17541
Swapping-Centric Neural Recording Systems
<|reference_start|>Swapping-Centric Neural Recording Systems: Neural interfaces read the activity of biological neurons to help advance the neurosciences and offer treatment options for severe neurological diseases. The total number of neurons that are now being recorded using multi-electrode interfaces is doubling roughly every 4-6 years \cite{Stevenson2011}. However, processing this exponentially-growing data in real-time under strict power-constraints puts an exorbitant amount of pressure on both compute and storage within traditional neural recording systems. Existing systems deploy various accelerators for better performance-per-watt while also integrating NVMs for data querying and better treatment decisions. These accelerators have direct access to a limited amount of fast SRAM-based memory that is unable to manage the growing data rates. Swapping to the NVM becomes inevitable; however, naive approaches are unable to complete during the refractory period of a neuron -- i.e., a few milliseconds -- which disrupts timely disease treatment. We propose co-designing accelerators and storage, with swapping as a primary design goal, using theoretical and practical models of compute and storage respectively to overcome these limitations.<|reference_end|>
arxiv
@article{ugur2024swapping-centric, title={Swapping-Centric Neural Recording Systems}, author={Muhammed Ugur, Raghavendra Pradyumna Pothukuchi, Abhishek Bhattacharjee}, journal={The 15th Annual Non-Volatile Memories Workshop (NVMW), March, 2024}, year={2024}, archivePrefix={arXiv}, eprint={2409.17541}, primaryClass={cs.AR} }
ugur2024swapping-centric
arxiv-662161
2409.17544
Optimizing the Induced Correlation in Omnibus Joint Graph Embeddings
<|reference_start|>Optimizing the Induced Correlation in Omnibus Joint Graph Embeddings: Theoretical and empirical evidence suggests that joint graph embedding algorithms induce correlation across the networks in the embedding space. In the Omnibus joint graph embedding framework, previous results explicitly delineated the dual effects of the algorithm-induced and model-inherent correlations on the correlation across the embedded networks. Accounting for and mitigating the algorithm-induced correlation is key to subsequent inference, as sub-optimal Omnibus matrix constructions have been demonstrated to lead to loss in inference fidelity. This work presents the first efforts to automate the Omnibus construction in order to address two key questions in this joint embedding framework: the correlation-to-OMNI problem and the flat correlation problem. In the flat correlation problem, we seek to understand the minimum algorithm-induced flat correlation (i.e., the same across all graph pairs) produced by a generalized Omnibus embedding. Working in a subspace of the fully general Omnibus matrices, we prove both a lower bound for this flat correlation and that the classical Omnibus construction induces the maximal flat correlation. In the correlation-to-OMNI problem, we present an algorithm -- named corr2Omni -- that, from a given matrix of estimated pairwise graph correlations, estimates the matrix of generalized Omnibus weights that induces optimal correlation in the embedding space. Moreover, in both simulated and real data settings, we demonstrate the increased effectiveness of our corr2Omni algorithm versus the classical Omnibus construction.<|reference_end|>
arxiv
@article{pantazis2024optimizing, title={Optimizing the Induced Correlation in Omnibus Joint Graph Embeddings}, author={Konstantinos Pantazis, Michael Trosset, William N. Frost, Carey E. Priebe, Vince Lyzinski}, journal={arXiv preprint arXiv:2409.17544}, year={2024}, archivePrefix={arXiv}, eprint={2409.17544}, primaryClass={stat.ML cs.LG math.ST stat.ME stat.TH} }
pantazis2024optimizing
arxiv-662162
2409.17545
Modulated Intervention Preference Optimization (MIPO): Keep the Easy, Refine the Difficult
<|reference_start|>Modulated Intervention Preference Optimization (MIPO): Keep the Easy, Refine the Difficult: Preference optimization methods typically begin training with a well-trained SFT model as a reference model. In RLHF and DPO, a regularization term is used during the preference optimization process to prevent the policy model from deviating too far from the reference model's distribution, thereby avoiding the generation of anomalous responses. When the reference model is already well-aligned with the given data or only requires slight adjustments, this approach can produce a well-aligned model. However, if the reference model is not aligned with the given data and requires significant deviation from its current state, a regularization term may actually hinder the model alignment. In this study, we propose \textbf{Modulated Intervention Preference Optimization (MIPO)} to address this issue. MIPO modulates the degree of intervention from the reference model based on how well the given data is aligned with it. If the data is well-aligned, the intervention is increased to prevent the policy model from diverging significantly from reference model. Conversely, if the alignment is poor, the interference is reduced to facilitate more extensive training. We compare the performance of MIPO and DPO using Mistral-7B and Llama3-8B in Alpaca Eval 2.0 and MT-Bench. The experimental results demonstrate that MIPO consistently outperforms DPO across various evaluation scenarios.<|reference_end|>
arxiv
@article{jang2024modulated, title={Modulated Intervention Preference Optimization (MIPO): Keep the Easy, Refine the Difficult}, author={Cheolhun Jang}, journal={arXiv preprint arXiv:2409.17545}, year={2024}, archivePrefix={arXiv}, eprint={2409.17545}, primaryClass={cs.CL cs.AI cs.LG} }
jang2024modulated
arxiv-662163
2409.17546
MASSFormer: Mobility-Aware Spectrum Sensing using Transformer-Driven Tiered Structure
<|reference_start|>MASSFormer: Mobility-Aware Spectrum Sensing using Transformer-Driven Tiered Structure: In this paper, we develop a novel mobility-aware transformer-driven tiered structure (MASSFormer) based cooperative spectrum sensing method that effectively models the spatio-temporal dynamics of user movements. Unlike existing methods, our method considers a dynamic scenario involving mobile primary users (PUs) and secondary users (SUs)and addresses the complexities introduced by user mobility. The transformer architecture utilizes an attention mechanism, enabling the proposed method to adeptly model the temporal dynamics of user mobility by effectively capturing long-range dependencies within the input data. The proposed method first computes tokens from the sequence of covariance matrices (CMs) for each SU and processes them in parallel using the SUtransformer network to learn the spatio-temporal features at SUlevel. Subsequently, the collaborative transformer network learns the group-level PU state from all SU-level feature representations. The attention-based sequence pooling method followed by the transformer encoder adjusts the contributions of all tokens. The main goal of predicting the PU states at each SU-level and group-level is to improve detection performance even more. We conducted a sufficient amount of simulations and compared the detection performance of different SS methods. The proposed method is tested under imperfect reporting channel scenarios to show robustness. The efficacy of our method is validated with the simulation results demonstrating its higher performance compared with existing methods in terms of detection probability, sensing error, and classification accuracy.<|reference_end|>
arxiv
@article{janu2024massformer:, title={MASSFormer: Mobility-Aware Spectrum Sensing using Transformer-Driven Tiered Structure}, author={Dimpal Janu, Sandeep Mandia, Kuldeep Singh and Sandeep Kumar}, journal={arXiv preprint arXiv:2409.17546}, year={2024}, archivePrefix={arXiv}, eprint={2409.17546}, primaryClass={cs.IT cs.LG math.IT} }
janu2024massformer:
arxiv-662164
2409.17547
Triple Point Masking
<|reference_start|>Triple Point Masking: Existing 3D mask learning methods encounter performance bottlenecks under limited data, and our objective is to overcome this limitation. In this paper, we introduce a triple point masking scheme, named TPM, which serves as a scalable framework for pre-training of masked autoencoders to achieve multi-mask learning for 3D point clouds. Specifically, we augment the baselines with two additional mask choices (i.e., medium mask and low mask) as our core insight is that the recovery process of an object can manifest in diverse ways. Previous high-masking schemes focus on capturing the global representation but lack the fine-grained recovery capability, so that the generated pre-trained weights tend to play a limited role in the fine-tuning process. With the support of the proposed TPM, available methods can exhibit more flexible and accurate completion capabilities, enabling the potential autoencoder in the pre-training stage to consider multiple representations of a single 3D object. In addition, an SVM-guided weight selection module is proposed to fill the encoder parameters for downstream networks with the optimal weight during the fine-tuning stage, maximizing linear accuracy and facilitating the acquisition of intricate representations for new objects. Extensive experiments show that the four baselines equipped with the proposed TPM achieve comprehensive performance improvements on various downstream tasks.<|reference_end|>
arxiv
@article{liu2024triple, title={Triple Point Masking}, author={Jiaming Liu, Linghe Kong, Yue Wu, Maoguo Gong, Hao Li, Qiguang Miao, Wenping Ma, Can Qin}, journal={arXiv preprint arXiv:2409.17547}, year={2024}, archivePrefix={arXiv}, eprint={2409.17547}, primaryClass={cs.CV cs.AI} }
liu2024triple
arxiv-662165
2409.17549
Canonical Representation and Force-Based Pretraining of 3D Tactile for Dexterous Visuo-Tactile Policy Learning
<|reference_start|>Canonical Representation and Force-Based Pretraining of 3D Tactile for Dexterous Visuo-Tactile Policy Learning: Tactile sensing plays a vital role in enabling robots to perform fine-grained, contact-rich tasks. However, the high dimensionality of tactile data, due to the large coverage on dexterous hands, poses significant challenges for effective tactile feature learning, especially for 3D tactile data, as there are no large standardized datasets and no strong pretrained backbones. To address these challenges, we propose a novel canonical representation that reduces the difficulty of 3D tactile feature learning and further introduces a force-based self-supervised pretraining task to capture both local and net force features, which are crucial for dexterous manipulation. Our method achieves an average success rate of 78% across four fine-grained, contact-rich dexterous manipulation tasks in real-world experiments, demonstrating effectiveness and robustness compared to other methods. Further analysis shows that our method fully utilizes both spatial and force information from 3D tactile data to accomplish the tasks. The videos can be viewed at https://3dtacdex.github.io.<|reference_end|>
arxiv
@article{wu2024canonical, title={Canonical Representation and Force-Based Pretraining of 3D Tactile for Dexterous Visuo-Tactile Policy Learning}, author={Tianhao Wu, Jinzhou Li, Jiyao Zhang, Mingdong Wu, Hao Dong}, journal={arXiv preprint arXiv:2409.17549}, year={2024}, archivePrefix={arXiv}, eprint={2409.17549}, primaryClass={cs.RO} }
wu2024canonical
arxiv-662166
2409.17550
A Simple but Strong Baseline for Sounding Video Generation: Effective Adaptation of Audio and Video Diffusion Models for Joint Generation
<|reference_start|>A Simple but Strong Baseline for Sounding Video Generation: Effective Adaptation of Audio and Video Diffusion Models for Joint Generation: In this work, we build a simple but strong baseline for sounding video generation. Given base diffusion models for audio and video, we integrate them with additional modules into a single model and train it to make the model jointly generate audio and video. To enhance alignment between audio-video pairs, we introduce two novel mechanisms in our model. The first one is timestep adjustment, which provides different timestep information to each base model. It is designed to align how samples are generated along with timesteps across modalities. The second one is a new design of the additional modules, termed Cross-Modal Conditioning as Positional Encoding (CMC-PE). In CMC-PE, cross-modal information is embedded as if it represents temporal position information, and the embeddings are fed into the model like positional encoding. Compared with the popular cross-attention mechanism, CMC-PE provides a better inductive bias for temporal alignment in the generated data. Experimental results validate the effectiveness of the two newly introduced mechanisms and also demonstrate that our method outperforms existing methods.<|reference_end|>
arxiv
@article{ishii2024a, title={A Simple but Strong Baseline for Sounding Video Generation: Effective Adaptation of Audio and Video Diffusion Models for Joint Generation}, author={Masato Ishii and Akio Hayakawa and Takashi Shibuya and Yuki Mitsufuji}, journal={arXiv preprint arXiv:2409.17550}, year={2024}, archivePrefix={arXiv}, eprint={2409.17550}, primaryClass={cs.LG cs.MM cs.SD eess.AS} }
ishii2024a
arxiv-662167
2409.17552
Expression Rates of Neural Operators for Linear Elliptic PDEs in Polytopes
<|reference_start|>Expression Rates of Neural Operators for Linear Elliptic PDEs in Polytopes: We study the approximation rates of a class of deep neural network approximations of operators, which arise as data-to-solution maps $\mathcal{G}^\dagger$ of linear elliptic partial differential equations (PDEs), and act between pairs $X,Y$ of suitable infinite-dimensional spaces. We prove expression rate bounds for approximate neural operators $\mathcal{G}$ with the structure $\mathcal{G} = \mathcal{R} \circ \mathcal{A} \circ \mathcal{E}$, with linear encoders $\mathcal{E}$ and decoders $\mathcal{R}$. The constructive proofs are via a recurrent NN structure obtained by unrolling exponentially convergent, self-consistent (``Richardson'') iterations. We bound the operator approximation error with respect to the linear Kolmogorov $N$-widths of the data and solution sets and in terms of the size of the approximation network. We prove expression rate bounds for approximate, neural solution operators emulating the coefficient-to-solution maps for elliptic PDEs set in $d$-dimensional polytopes, with $d\in\{2,3\}$, and subject to Dirichlet-, Neumann- or mixed boundary conditions. Exploiting weighted norm characterizations of the solution sets of elliptic PDEs in polytopes, we show algebraic rates of expression for problems with data with finite regularity, and exponential operator expression rates for analytic data.<|reference_end|>
arxiv
@article{marcati2024expression, title={Expression Rates of Neural Operators for Linear Elliptic PDEs in Polytopes}, author={Carlo Marcati and Christoph Schwab}, journal={arXiv preprint arXiv:2409.17552}, year={2024}, archivePrefix={arXiv}, eprint={2409.17552}, primaryClass={math.NA cs.NA} }
marcati2024expression
arxiv-662168
2409.17553
What Roles can Spatial Modulation and Space Shift Keying Play in LEO Satellite-Assisted Communication?
<|reference_start|>What Roles can Spatial Modulation and Space Shift Keying Play in LEO Satellite-Assisted Communication?: In recent years, the rapid evolution of satellite communications play a pivotal role in addressing the ever-increasing demand for global connectivity, among which the Low Earth Orbit (LEO) satellites attract a great amount of attention due to their low latency and high data throughput capabilities. Based on this, we explore spatial modulation (SM) and space shift keying (SSK) designs as pivotal techniques to enhance spectral efficiency (SE) and bit-error rate (BER) performance in the LEO satellite-assisted multiple-input multiple-output (MIMO) systems. The various performance analysis of these designs are presented in this paper, revealing insightful findings and conclusions through analytical methods and Monte Carlo simulations with perfect and imperfect channel state information (CSI) estimation. The results provide a comprehensive analysis of the merits and trade-offs associated with the investigated schemes, particularly in terms of BER, computational complexity, and SE. This analysis underscores the potential of both schemes as viable candidates for future 6G LEO satellite-assisted wireless communication systems.<|reference_end|>
arxiv
@article{zhang2024what, title={What Roles can Spatial Modulation and Space Shift Keying Play in LEO Satellite-Assisted Communication?}, author={Chaorong Zhang, Qingying Wu, Yuyan Liu, Benjamin K. Ng, and Chan-Tong Lam}, journal={arXiv preprint arXiv:2409.17553}, year={2024}, archivePrefix={arXiv}, eprint={2409.17553}, primaryClass={cs.IT eess.SP math.IT} }
zhang2024what
arxiv-662169
2409.17555
Advancing Open-Set Domain Generalization Using Evidential Bi-Level Hardest Domain Scheduler
<|reference_start|>Advancing Open-Set Domain Generalization Using Evidential Bi-Level Hardest Domain Scheduler: In Open-Set Domain Generalization (OSDG), the model is exposed to both new variations of data appearance (domains) and open-set conditions, where both known and novel categories are present at test time. The challenges of this task arise from the dual need to generalize across diverse domains and accurately quantify category novelty, which is critical for applications in dynamic environments. Recently, meta-learning techniques have demonstrated superior results in OSDG, effectively orchestrating the meta-train and -test tasks by employing varied random categories and predefined domain partition strategies. These approaches prioritize a well-designed training schedule over traditional methods that focus primarily on data augmentation and the enhancement of discriminative feature learning. The prevailing meta-learning models in OSDG typically utilize a predefined sequential domain scheduler to structure data partitions. However, a crucial aspect that remains inadequately explored is the influence brought by strategies of domain schedulers during training. In this paper, we observe that an adaptive domain scheduler benefits more in OSDG compared with prefixed sequential and random domain schedulers. We propose the Evidential Bi-Level Hardest Domain Scheduler (EBiL-HaDS) to achieve an adaptive domain scheduler. This method strategically sequences domains by assessing their reliabilities in utilizing a follower network, trained with confidence scores learned in an evidential manner, regularized by max rebiasing discrepancy, and optimized in a bi-level manner. The results show that our method substantially improves OSDG performance and achieves more discriminative embeddings for both the seen and unseen categories. The source code will be available at https://github.com/KPeng9510/EBiL-HaDS.<|reference_end|>
arxiv
@article{peng2024advancing, title={Advancing Open-Set Domain Generalization Using Evidential Bi-Level Hardest Domain Scheduler}, author={Kunyu Peng, Di Wen, Kailun Yang, Ao Luo, Yufan Chen, Jia Fu, M. Saquib Sarfraz, Alina Roitberg, Rainer Stiefelhagen}, journal={arXiv preprint arXiv:2409.17555}, year={2024}, archivePrefix={arXiv}, eprint={2409.17555}, primaryClass={cs.LG cs.CV} }
peng2024advancing
arxiv-662170
2409.17557
Joint Source-Channel Coding: Fundamentals and Recent Progress in Practical Designs
<|reference_start|>Joint Source-Channel Coding: Fundamentals and Recent Progress in Practical Designs: Semantic- and task-oriented communication has emerged as a promising approach to reducing the latency and bandwidth requirements of next-generation mobile networks by transmitting only the most relevant information needed to complete a specific task at the receiver. This is particularly advantageous for machine-oriented communication of high data rate content, such as images and videos, where the goal is rapid and accurate inference, rather than perfect signal reconstruction. While semantic- and task-oriented compression can be implemented in conventional communication systems, joint source-channel coding (JSCC) offers an alternative end-to-end approach by optimizing compression and channel coding together, or even directly mapping the source signal to the modulated waveform. Although all digital communication systems today rely on separation, thanks to its modularity, JSCC is known to achieve higher performance in finite blocklength scenarios, and to avoid cliff and the levelling-off effects in time-varying channel scenarios. This article provides an overview of the information theoretic foundations of JSCC, surveys practical JSCC designs over the decades, and discusses the reasons for their limited adoption in practical systems. We then examine the recent resurgence of JSCC, driven by the integration of deep learning techniques, particularly through DeepJSCC, highlighting its many surprising advantages in various scenarios. Finally, we discuss why it may be time to reconsider today's strictly separate architectures, and reintroduce JSCC to enable high-fidelity, low-latency communications in critical applications such as autonomous driving, drone surveillance, or wearable systems.<|reference_end|>
arxiv
@article{gündüz2024joint, title={Joint Source-Channel Coding: Fundamentals and Recent Progress in Practical Designs}, author={Deniz G"und"uz, Mich`ele A. Wigger, Tze-Yang Tung, Ping Zhang, Yong Xiao}, journal={arXiv preprint arXiv:2409.17557}, year={2024}, archivePrefix={arXiv}, eprint={2409.17557}, primaryClass={cs.IT cs.LG math.IT} }
gündüz2024joint
arxiv-662171
2409.17560
Dynamic Subframe Splitting and Spatio-Temporal Motion Entangled Sparse Attention for RGB-E Tracking
<|reference_start|>Dynamic Subframe Splitting and Spatio-Temporal Motion Entangled Sparse Attention for RGB-E Tracking: Event-based bionic camera asynchronously captures dynamic scenes with high temporal resolution and high dynamic range, offering potential for the integration of events and RGB under conditions of illumination degradation and fast motion. Existing RGB-E tracking methods model event characteristics utilising attention mechanism of Transformer before integrating both modalities. Nevertheless, these methods involve aggregating the event stream into a single event frame, lacking the utilisation of the temporal information inherent in the event stream.Moreover, the traditional attention mechanism is well-suited for dense semantic features, while the attention mechanism for sparse event features require revolution. In this paper, we propose a dynamic event subframe splitting strategy to split the event stream into more fine-grained event clusters, aiming to capture spatio-temporal features that contain motion cues. Based on this, we design an event-based sparse attention mechanism to enhance the interaction of event features in temporal and spatial dimensions. The experimental results indicate that our method outperforms existing state-of-the-art methods on the FE240 and COESOT datasets, providing an effective processing manner for the event data.<|reference_end|>
arxiv
@article{shao2024dynamic, title={Dynamic Subframe Splitting and Spatio-Temporal Motion Entangled Sparse Attention for RGB-E Tracking}, author={Pengcheng Shao, Tianyang Xu, Xuefeng Zhu, Xiaojun Wu, Josef Kittler}, journal={arXiv preprint arXiv:2409.17560}, year={2024}, archivePrefix={arXiv}, eprint={2409.17560}, primaryClass={cs.CV} }
shao2024dynamic
arxiv-662172
2409.17561
TestBench: Evaluating Class-Level Test Case Generation Capability of Large Language Models
<|reference_start|>TestBench: Evaluating Class-Level Test Case Generation Capability of Large Language Models: Software testing is a crucial phase in the software life cycle, helping identify potential risks and reduce maintenance costs. With the advancement of Large Language Models (LLMs), researchers have proposed an increasing number of LLM-based software testing techniques, particularly in the area of test case generation. Despite the growing interest, limited efforts have been made to thoroughly evaluate the actual capabilities of LLMs in this task. In this paper, we introduce TestBench, a benchmark for class-level LLM-based test case generation. We construct a dataset of 108 Java programs from 9 real-world, large-scale projects on GitHub, each representing a different thematic domain. We then design three distinct types of prompts based on context descriptions, including self-contained context, full context, and simple context. Besides, we propose a fine-grained evaluation framework that considers five aspects of test cases: syntactic correctness, compilation correctness, test correctness, code coverage rate, and defect detection rate. Furthermore, we propose a heuristic algorithm to repair erroneous test cases generated by LLMs. We evaluate CodeLlama-13b, GPT-3.5, and GPT-4 on the TestBench, and our experimental results indicate that larger models demonstrate a greater ability to effectively utilize contextual information, thus generating higher-quality test cases. Smaller models may struggle with the noise introduced by the extensive information contained within the full context. However, when using the simplified version, namely the simple context, which is derived from the full context via abstract syntax tree analysis, the performance of these models improves significantly. Our analysis highlights the current progress and pinpoints future directions to further enhance the effectiveness of models by handling contextual information for test case generation.<|reference_end|>
arxiv
@article{zhang2024testbench:, title={TestBench: Evaluating Class-Level Test Case Generation Capability of Large Language Models}, author={Quanjun Zhang, Ye Shang, Chunrong Fang, Siqi Gu, Jianyi Zhou and Zhenyu Chen}, journal={arXiv preprint arXiv:2409.17561}, year={2024}, archivePrefix={arXiv}, eprint={2409.17561}, primaryClass={cs.SE} }
zhang2024testbench:
arxiv-662173
2409.17562
Software for the SpaceDREAM Robotic Arm
<|reference_start|>Software for the SpaceDREAM Robotic Arm: Impedance-controlled robots are widely used on Earth to perform interaction-rich tasks and will be a key enabler for In-Space Servicing, Assembly and Manufacturing (ISAM) activities. This paper introduces the software architecture used on the On-Board Computer (OBC) for the planned SpaceDREAM mission aiming to validate such robotic arm in Lower Earth Orbit (LEO) conducted by the German Aerospace Center (DLR) in cooperation with KINETIK Space GmbH and the Technical University of Munich (TUM). During the mission several free motion as well as contact tasks are to be performed in order to verify proper functionality of the robot in position and impedance control on joint level as well as in cartesian control. The tasks are selected to be representative for subsequent servicing missions e.g. requiring interface docking or precise manipulation. The software on the OBC commands the robot's joints via SpaceWire to perform those mission tasks, reads camera images and data from additional sensors and sends telemetry data through an Ethernet link via the spacecraft down to Earth. It is set up to execute a predefined mission after receiving a start signal from the spacecraft while it should be extendable to receive commands from Earth for later missions. Core design principle was to reuse as much existing software and to stay as close as possible to existing robot software stacks at DLR. This allowed for a quick full operational start of the robot arm compared to a custom development of all robot software, a lower entry barrier for software developers as well as a reuse of existing libraries. While not every line of code can be tested with this design, most of the software has already proven its functionality through daily execution on multiple robot systems.<|reference_end|>
arxiv
@article{mühlbauer2024software, title={Software for the SpaceDREAM Robotic Arm}, author={Maximilian M"uhlbauer, Maxime Chalon, Maximilian Ulmer, Alin Albu-Sch"affer}, journal={arXiv preprint arXiv:2409.17562}, year={2024}, archivePrefix={arXiv}, eprint={2409.17562}, primaryClass={cs.RO} }
mühlbauer2024software
arxiv-662174
2409.17564
General Compression Framework for Efficient Transformer Object Tracking
<|reference_start|>General Compression Framework for Efficient Transformer Object Tracking: Transformer-based trackers have established a dominant role in the field of visual object tracking. While these trackers exhibit promising performance, their deployment on resource-constrained devices remains challenging due to inefficiencies. To improve the inference efficiency and reduce the computation cost, prior approaches have aimed to either design lightweight trackers or distill knowledge from larger teacher models into more compact student trackers. However, these solutions often sacrifice accuracy for speed. Thus, we propose a general model compression framework for efficient transformer object tracking, named CompressTracker, to reduce the size of a pre-trained tracking model into a lightweight tracker with minimal performance degradation. Our approach features a novel stage division strategy that segments the transformer layers of the teacher model into distinct stages, enabling the student model to emulate each corresponding teacher stage more effectively. Additionally, we also design a unique replacement training technique that involves randomly substituting specific stages in the student model with those from the teacher model, as opposed to training the student model in isolation. Replacement training enhances the student model's ability to replicate the teacher model's behavior. To further forcing student model to emulate teacher model, we incorporate prediction guidance and stage-wise feature mimicking to provide additional supervision during the teacher model's compression process. Our framework CompressTracker is structurally agnostic, making it compatible with any transformer architecture. We conduct a series of experiment to verify the effectiveness and generalizability of CompressTracker. Our CompressTracker-4 with 4 transformer layers, which is compressed from OSTrack, retains about 96% performance on LaSOT (66.1% AUC) while achieves 2.17x speed up.<|reference_end|>
arxiv
@article{hong2024general, title={General Compression Framework for Efficient Transformer Object Tracking}, author={Lingyi Hong, Jinglun Li, Xinyu Zhou, Shilin Yan, Pinxue Guo, Kaixun Jiang, Zhaoyu Chen, Shuyong Gao, Wei Zhang, Hong Lu, Wenqiang Zhang}, journal={arXiv preprint arXiv:2409.17564}, year={2024}, archivePrefix={arXiv}, eprint={2409.17564}, primaryClass={cs.CV} }
hong2024general
arxiv-662175
2409.17565
Pixel-Space Post-Training of Latent Diffusion Models
<|reference_start|>Pixel-Space Post-Training of Latent Diffusion Models: Latent diffusion models (LDMs) have made significant advancements in the field of image generation in recent years. One major advantage of LDMs is their ability to operate in a compressed latent space, allowing for more efficient training and deployment. However, despite these advantages, challenges with LDMs still remain. For example, it has been observed that LDMs often generate high-frequency details and complex compositions imperfectly. We hypothesize that one reason for these flaws is due to the fact that all pre- and post-training of LDMs are done in latent space, which is typically $8 \times 8$ lower spatial-resolution than the output images. To address this issue, we propose adding pixel-space supervision in the post-training process to better preserve high-frequency details. Experimentally, we show that adding a pixel-space objective significantly improves both supervised quality fine-tuning and preference-based post-training by a large margin on a state-of-the-art DiT transformer and U-Net diffusion models in both visual quality and visual flaw metrics, while maintaining the same text alignment quality.<|reference_end|>
arxiv
@article{zhang2024pixel-space, title={Pixel-Space Post-Training of Latent Diffusion Models}, author={Christina Zhang, Simran Motwani, Matthew Yu, Ji Hou, Felix Juefei-Xu, Sam Tsai, Peter Vajda, Zijian He, Jialiang Wang}, journal={arXiv preprint arXiv:2409.17565}, year={2024}, archivePrefix={arXiv}, eprint={2409.17565}, primaryClass={cs.CV cs.AI cs.LG} }
zhang2024pixel-space
arxiv-662176
2409.17566
Flexiffusion: Segment-wise Neural Architecture Search for Flexible Denoising Schedule
<|reference_start|>Flexiffusion: Segment-wise Neural Architecture Search for Flexible Denoising Schedule: Diffusion models are cutting-edge generative models adept at producing diverse, high-quality images. Despite their effectiveness, these models often require significant computational resources owing to their numerous sequential denoising steps and the significant inference cost of each step. Recently, Neural Architecture Search (NAS) techniques have been employed to automatically search for faster generation processes. However, NAS for diffusion is inherently time-consuming as it requires estimating thousands of diffusion models to search for the optimal one. In this paper, we introduce Flexiffusion, a novel training-free NAS paradigm designed to accelerate diffusion models by concurrently optimizing generation steps and network structures. Specifically, we partition the generation process into isometric step segments, each sequentially composed of a full step, multiple partial steps, and several null steps. The full step computes all network blocks, while the partial step involves part of the blocks, and the null step entails no computation. Flexiffusion autonomously explores flexible step combinations for each segment, substantially reducing search costs and enabling greater acceleration compared to the state-of-the-art (SOTA) method for diffusion models. Our searched models reported speedup factors of $2.6\times$ and $1.5\times$ for the original LDM-4-G and the SOTA, respectively. The factors for Stable Diffusion V1.5 and the SOTA are $5.1\times$ and $2.0\times$. We also verified the performance of Flexiffusion on multiple datasets, and positive experiment results indicate that Flexiffusion can effectively reduce redundancy in diffusion models.<|reference_end|>
arxiv
@article{huang2024flexiffusion:, title={Flexiffusion: Segment-wise Neural Architecture Search for Flexible Denoising Schedule}, author={Hongtao Huang, Xiaojun Chang and Lina Yao}, journal={arXiv preprint arXiv:2409.17566}, year={2024}, archivePrefix={arXiv}, eprint={2409.17566}, primaryClass={cs.CV} }
huang2024flexiffusion:
arxiv-662177
2409.17567
Derandomizing Multi-Distribution Learning
<|reference_start|>Derandomizing Multi-Distribution Learning: Multi-distribution or collaborative learning involves learning a single predictor that works well across multiple data distributions, using samples from each during training. Recent research on multi-distribution learning, focusing on binary loss and finite VC dimension classes, has shown near-optimal sample complexity that is achieved with oracle efficient algorithms. That is, these algorithms are computationally efficient given an efficient ERM for the class. Unlike in classical PAC learning, where the optimal sample complexity is achieved with deterministic predictors, current multi-distribution learning algorithms output randomized predictors. This raises the question: can these algorithms be derandomized to produce a deterministic predictor for multiple distributions? Through a reduction to discrepancy minimization, we show that derandomizing multi-distribution learning is computationally hard, even when ERM is computationally efficient. On the positive side, we identify a structural condition enabling an efficient black-box reduction, converting existing randomized multi-distribution predictors into deterministic ones.<|reference_end|>
arxiv
@article{larsen2024derandomizing, title={Derandomizing Multi-Distribution Learning}, author={Kasper Green Larsen, Omar Montasser, Nikita Zhivotovskiy}, journal={arXiv preprint arXiv:2409.17567}, year={2024}, archivePrefix={arXiv}, eprint={2409.17567}, primaryClass={cs.LG cs.CC cs.DS math.ST stat.TH} }
larsen2024derandomizing
arxiv-662178
2409.17568
Showing Many Labels in Multi-label Classification Models: An Empirical Study of Adversarial Examples
<|reference_start|>Showing Many Labels in Multi-label Classification Models: An Empirical Study of Adversarial Examples: With the rapid development of Deep Neural Networks (DNNs), they have been applied in numerous fields. However, research indicates that DNNs are susceptible to adversarial examples, and this is equally true in the multi-label domain. To further investigate multi-label adversarial examples, we introduce a novel type of attacks, termed "Showing Many Labels". The objective of this attack is to maximize the number of labels included in the classifier's prediction results. In our experiments, we select nine attack algorithms and evaluate their performance under "Showing Many Labels". Eight of the attack algorithms were adapted from the multi-class environment to the multi-label environment, while the remaining one was specifically designed for the multi-label environment. We choose ML-LIW and ML-GCN as target models and train them on four popular multi-label datasets: VOC2007, VOC2012, NUS-WIDE, and COCO. We record the success rate of each algorithm when it shows the expected number of labels in eight different scenarios. Experimental results indicate that under the "Showing Many Labels", iterative attacks perform significantly better than one-step attacks. Moreover, it is possible to show all labels in the dataset.<|reference_end|>
arxiv
@article{liu2024showing, title={Showing Many Labels in Multi-label Classification Models: An Empirical Study of Adversarial Examples}, author={Yujiang Liu, Wenjian Luo, Zhijian Chen, Muhammad Luqman Naseem}, journal={arXiv preprint arXiv:2409.17568}, year={2024}, archivePrefix={arXiv}, eprint={2409.17568}, primaryClass={cs.AI} }
liu2024showing
arxiv-662179
2409.17572
Dr GPT in Campus Counseling: Understanding Higher Education Students' Opinions on LLM-assisted Mental Health Services
<|reference_start|>Dr GPT in Campus Counseling: Understanding Higher Education Students' Opinions on LLM-assisted Mental Health Services: In response to the increasing mental health challenges faced by college students, we sought to understand their perspectives on how AI applications, particularly Large Language Models (LLMs), can be leveraged to enhance their mental well-being. Through pilot interviews with ten diverse students, we explored their opinions on the use of LLMs across five fictional scenarios: General Information Inquiry, Initial Screening, Reshaping Patient-Expert Dynamics, Long-term Care, and Follow-up Care. Our findings revealed that students' acceptance of LLMs varied by scenario, with participants highlighting both potential benefits, such as proactive engagement and personalized follow-up care, and concerns, including limitations in training data and emotional support. These insights inform how AI technology should be designed and implemented to effectively support and enhance students' mental well-being, particularly in scenarios where LLMs can complement traditional methods, while maintaining empathy and respecting individual preferences.<|reference_end|>
arxiv
@article{zhang2024dr., title={Dr. GPT in Campus Counseling: Understanding Higher Education Students' Opinions on LLM-assisted Mental Health Services}, author={Owen Xingjian Zhang, Shuyao Zhou, Jiayi Geng, Yuhan Liu, Sunny Xun Liu}, journal={arXiv preprint arXiv:2409.17572}, year={2024}, archivePrefix={arXiv}, eprint={2409.17572}, primaryClass={cs.HC cs.AI} }
zhang2024dr.
arxiv-662180
2409.17576
ID$^3$: Identity-Preserving-yet-Diversified Diffusion Models for Synthetic Face Recognition
<|reference_start|>ID$^3$: Identity-Preserving-yet-Diversified Diffusion Models for Synthetic Face Recognition: Synthetic face recognition (SFR) aims to generate synthetic face datasets that mimic the distribution of real face data, which allows for training face recognition models in a privacy-preserving manner. Despite the remarkable potential of diffusion models in image generation, current diffusion-based SFR models struggle with generalization to real-world faces. To address this limitation, we outline three key objectives for SFR: (1) promoting diversity across identities (inter-class diversity), (2) ensuring diversity within each identity by injecting various facial attributes (intra-class diversity), and (3) maintaining identity consistency within each identity group (intra-class identity preservation). Inspired by these goals, we introduce a diffusion-fueled SFR model termed $\text{ID}^3$. $\text{ID}^3$ employs an ID-preserving loss to generate diverse yet identity-consistent facial appearances. Theoretically, we show that minimizing this loss is equivalent to maximizing the lower bound of an adjusted conditional log-likelihood over ID-preserving data. This equivalence motivates an ID-preserving sampling algorithm, which operates over an adjusted gradient vector field, enabling the generation of fake face recognition datasets that approximate the distribution of real-world faces. Extensive experiments across five challenging benchmarks validate the advantages of $\text{ID}^3$.<|reference_end|>
arxiv
@article{li2024id$^3$:, title={ID$^3$: Identity-Preserving-yet-Diversified Diffusion Models for Synthetic Face Recognition}, author={Shen Li, Jianqing Xu, Jiaying Wu, Miao Xiong, Ailin Deng, Jiazhen Ji, Yuge Huang, Wenjie Feng, Shouhong Ding, Bryan Hooi}, journal={arXiv preprint arXiv:2409.17576}, year={2024}, archivePrefix={arXiv}, eprint={2409.17576}, primaryClass={cs.CV} }
li2024id$^3$:
arxiv-662181
2409.17577
Leveraging Annotator Disagreement for Text Classification
<|reference_start|>Leveraging Annotator Disagreement for Text Classification: It is common practice in text classification to only use one majority label for model training even if a dataset has been annotated by multiple annotators. Doing so can remove valuable nuances and diverse perspectives inherent in the annotators' assessments. This paper proposes and compares three different strategies to leverage annotator disagreement for text classification: a probability-based multi-label method, an ensemble system, and instruction tuning. All three approaches are evaluated on the tasks of hate speech and abusive conversation detection, which inherently entail a high degree of subjectivity. Moreover, to evaluate the effectiveness of embracing annotation disagreements for model training, we conduct an online survey that compares the performance of the multi-label model against a baseline model, which is trained with the majority label. The results show that in hate speech detection, the multi-label method outperforms the other two approaches, while in abusive conversation detection, instruction tuning achieves the best performance. The results of the survey also show that the outputs from the multi-label models are considered a better representation of the texts than the single-label model.<|reference_end|>
arxiv
@article{xu2024leveraging, title={Leveraging Annotator Disagreement for Text Classification}, author={Jin Xu and Mari"et Theune and Daniel Braun}, journal={arXiv preprint arXiv:2409.17577}, year={2024}, archivePrefix={arXiv}, eprint={2409.17577}, primaryClass={cs.CL} }
xu2024leveraging
arxiv-662182
2409.17578
Expanding Perspectives on Data Privacy: Insights from Rural Togo
<|reference_start|>Expanding Perspectives on Data Privacy: Insights from Rural Togo: Passively collected "big" data sources are increasingly used to inform critical development policy decisions in low- and middle-income countries. While prior work highlights how such approaches may reveal sensitive information, enable surveillance, and centralize power, less is known about the corresponding privacy concerns, hopes, and fears of the people directly impacted by these policies -- people sometimes referred to as experiential experts. To understand the perspectives of experiential experts, we conducted semi-structured interviews with people living in rural villages in Togo shortly after an entirely digital cash transfer program was launched that used machine learning and mobile phone metadata to determine program eligibility. This paper documents participants' privacy concerns surrounding the introduction of big data approaches in development policy. We find that the privacy concerns of our experiential experts differ from those raised by privacy and development domain experts. To facilitate a more robust and constructive account of privacy, we discuss implications for policies and designs that take seriously the privacy concerns raised by both experiential experts and domain experts.<|reference_end|>
arxiv
@article{kahn2024expanding, title={Expanding Perspectives on Data Privacy: Insights from Rural Togo}, author={Zoe Kahn, Meyebinesso Farida Carelle Pere, Emily Aiken, Nitin Kohli, Joshua E. Blumenstock}, journal={arXiv preprint arXiv:2409.17578}, year={2024}, archivePrefix={arXiv}, eprint={2409.17578}, primaryClass={cs.HC} }
kahn2024expanding
arxiv-662183
2409.17580
Enhancing Structured-Data Retrieval with GraphRAG: Soccer Data Case Study
<|reference_start|>Enhancing Structured-Data Retrieval with GraphRAG: Soccer Data Case Study: Extracting meaningful insights from large and complex datasets poses significant challenges, particularly in ensuring the accuracy and relevance of retrieved information. Traditional data retrieval methods such as sequential search and index-based retrieval often fail when handling intricate and interconnected data structures, resulting in incomplete or misleading outputs. To overcome these limitations, we introduce Structured-GraphRAG, a versatile framework designed to enhance information retrieval across structured datasets in natural language queries. Structured-GraphRAG utilizes multiple knowledge graphs, which represent data in a structured format and capture complex relationships between entities, enabling a more nuanced and comprehensive retrieval of information. This graph-based approach reduces the risk of errors in language model outputs by grounding responses in a structured format, thereby enhancing the reliability of results. We demonstrate the effectiveness of Structured-GraphRAG by comparing its performance with that of a recently published method using traditional retrieval-augmented generation. Our findings show that Structured-GraphRAG significantly improves query processing efficiency and reduces response times. While our case study focuses on soccer data, the framework's design is broadly applicable, offering a powerful tool for data analysis and enhancing language model applications across various structured domains.<|reference_end|>
arxiv
@article{sepasdar2024enhancing, title={Enhancing Structured-Data Retrieval with GraphRAG: Soccer Data Case Study}, author={Zahra Sepasdar, Sushant Gautam, Cise Midoglu, Michael A. Riegler, and P{aa}l Halvorsen}, journal={arXiv preprint arXiv:2409.17580}, year={2024}, archivePrefix={arXiv}, eprint={2409.17580}, primaryClass={cs.IR cs.AI cs.DB} }
sepasdar2024enhancing
arxiv-662184
2409.17581
A Scalable Data-Driven Framework for Systematic Analysis of SEC 10-K Filings Using Large Language Models
<|reference_start|>A Scalable Data-Driven Framework for Systematic Analysis of SEC 10-K Filings Using Large Language Models: The number of companies listed on the NYSE has been growing exponentially, creating a significant challenge for market analysts, traders, and stockholders who must monitor and assess the performance and strategic shifts of a large number of companies regularly. There is an increasing need for a fast, cost-effective, and comprehensive method to evaluate the performance and detect and compare many companies' strategy changes efficiently. We propose a novel data-driven approach that leverages large language models (LLMs) to systematically analyze and rate the performance of companies based on their SEC 10-K filings. These filings, which provide detailed annual reports on a company's financial performance and strategic direction, serve as a rich source of data for evaluating various aspects of corporate health, including confidence, environmental sustainability, innovation, and workforce management. We also introduce an automated system for extracting and preprocessing 10-K filings. This system accurately identifies and segments the required sections as outlined by the SEC, while also isolating key textual content that contains critical information about the company. This curated data is then fed into Cohere's Command-R+ LLM to generate quantitative ratings across various performance metrics. These ratings are subsequently processed and visualized to provide actionable insights. The proposed scheme is then implemented on an interactive GUI as a no-code solution for running the data pipeline and creating the visualizations. The application showcases the rating results and provides year-on-year comparisons of company performance.<|reference_end|>
arxiv
@article{daimi2024a, title={A Scalable Data-Driven Framework for Systematic Analysis of SEC 10-K Filings Using Large Language Models}, author={Syed Affan Daimi, Asma Iqbal}, journal={arXiv preprint arXiv:2409.17581}, year={2024}, archivePrefix={arXiv}, eprint={2409.17581}, primaryClass={cs.AI} }
daimi2024a
arxiv-662185
2409.17582
Multiplicative Logit Adjustment Approximates Neural-Collapse-Aware Decision Boundary Adjustment
<|reference_start|>Multiplicative Logit Adjustment Approximates Neural-Collapse-Aware Decision Boundary Adjustment: Real-world data distributions are often highly skewed. This has spurred a growing body of research on long-tailed recognition, aimed at addressing the imbalance in training classification models. Among the methods studied, multiplicative logit adjustment (MLA) stands out as a simple and effective method. What theoretical foundation explains the effectiveness of this heuristic method? We provide a justification for the effectiveness of MLA with the following two-step process. First, we develop a theory that adjusts optimal decision boundaries by estimating feature spread on the basis of neural collapse. Second, we demonstrate that MLA approximates this optimal method. Additionally, through experiments on long-tailed datasets, we illustrate the practical usefulness of MLA under more realistic conditions. We also offer experimental insights to guide the tuning of MLA hyperparameters.<|reference_end|>
arxiv
@article{hasegawa2024multiplicative, title={Multiplicative Logit Adjustment Approximates Neural-Collapse-Aware Decision Boundary Adjustment}, author={Naoya Hasegawa, Issei Sato}, journal={arXiv preprint arXiv:2409.17582}, year={2024}, archivePrefix={arXiv}, eprint={2409.17582}, primaryClass={cs.LG} }
hasegawa2024multiplicative
arxiv-662186
2409.17583
Let the Quantum Creep In: Designing Quantum Neural Network Models by Gradually Swapping Out Classical Components
<|reference_start|>Let the Quantum Creep In: Designing Quantum Neural Network Models by Gradually Swapping Out Classical Components: Artificial Intelligence (AI), with its multiplier effect and wide applications in multiple areas, could potentially be an important application of quantum computing. Since modern AI systems are often built on neural networks, the design of quantum neural networks becomes a key challenge in integrating quantum computing into AI. To provide a more fine-grained characterisation of the impact of quantum components on the performance of neural networks, we propose a framework where classical neural network layers are gradually replaced by quantum layers that have the same type of input and output while keeping the flow of information between layers unchanged, different from most current research in quantum neural network, which favours an end-to-end quantum model. We start with a simple three-layer classical neural network without any normalisation layers or activation functions, and gradually change the classical layers to the corresponding quantum versions. We conduct numerical experiments on image classification datasets such as the MNIST, FashionMNIST and CIFAR-10 datasets to demonstrate the change of performance brought by the systematic introduction of quantum components. Through this framework, our research sheds new light on the design of future quantum neural network models where it could be more favourable to search for methods and frameworks that harness the advantages from both the classical and quantum worlds.<|reference_end|>
arxiv
@article{wang2024let, title={Let the Quantum Creep In: Designing Quantum Neural Network Models by Gradually Swapping Out Classical Components}, author={Peiyong Wang, Casey. R. Myers, Lloyd C. L. Hollenberg, Udaya Parampalli}, journal={arXiv preprint arXiv:2409.17583}, year={2024}, archivePrefix={arXiv}, eprint={2409.17583}, primaryClass={quant-ph cs.AI cs.CV cs.LG} }
wang2024let
arxiv-662187
2409.17587
Multimodal Banking Dataset: Understanding Client Needs through Event Sequences
<|reference_start|>Multimodal Banking Dataset: Understanding Client Needs through Event Sequences: Financial organizations collect a huge amount of data about clients that typically has a temporal (sequential) structure and is collected from various sources (modalities). Due to privacy issues, there are no large-scale open-source multimodal datasets of event sequences, which significantly limits the research in this area. In this paper, we present the industrial-scale publicly available multimodal banking dataset, MBD, that contains more than 1.5M corporate clients with several modalities: 950M bank transactions, 1B geo position events, 5M embeddings of dialogues with technical support and monthly aggregated purchases of four bank's products. All entries are properly anonymized from real proprietary bank data. Using this dataset, we introduce a novel benchmark with two business tasks: campaigning (purchase prediction in the next month) and matching of clients. We provide numerical results that demonstrate the superiority of our multi-modal baselines over single-modal techniques for each task. As a result, the proposed dataset can open new perspectives and facilitate the future development of practically important large-scale multimodal algorithms for event sequences. HuggingFace Link: https://huggingface.co/datasets/ai-lab/MBD Github Link: https://github.com/Dzhambo/MBD<|reference_end|>
arxiv
@article{dzhambulat2024multimodal, title={Multimodal Banking Dataset: Understanding Client Needs through Event Sequences}, author={Mollaev Dzhambulat, Alexander Kostin, Postnova Maria, Ivan Karpukhin, Ivan A Kireev, Gleb Gusev, Andrey Savchenko}, journal={arXiv preprint arXiv:2409.17587}, year={2024}, archivePrefix={arXiv}, eprint={2409.17587}, primaryClass={cs.LG cs.AI} }
dzhambulat2024multimodal
arxiv-662188
2409.17588
DualCoTs: Dual Chain-of-Thoughts Prompting for Sentiment Lexicon Expansion of Idioms
<|reference_start|>DualCoTs: Dual Chain-of-Thoughts Prompting for Sentiment Lexicon Expansion of Idioms: Idioms represent a ubiquitous vehicle for conveying sentiments in the realm of everyday discourse, rendering the nuanced analysis of idiom sentiment crucial for a comprehensive understanding of emotional expression within real-world texts. Nevertheless, the existing corpora dedicated to idiom sentiment analysis considerably limit research in text sentiment analysis. In this paper, we propose an innovative approach to automatically expand the sentiment lexicon for idioms, leveraging the capabilities of large language models through the application of Chain-of-Thought prompting. To demonstrate the effectiveness of this approach, we integrate multiple existing resources and construct an emotional idiom lexicon expansion dataset (called EmoIdiomE), which encompasses a comprehensive repository of Chinese and English idioms. Then we designed the Dual Chain-of-Thoughts (DualCoTs) method, which combines insights from linguistics and psycholinguistics, to demonstrate the effectiveness of using large models to automatically expand the sentiment lexicon for idioms. Experiments show that DualCoTs is effective in idioms sentiment lexicon expansion in both Chinese and English. For reproducibility, we will release the data and code upon acceptance.<|reference_end|>
arxiv
@article{niu2024dualcots:, title={DualCoTs: Dual Chain-of-Thoughts Prompting for Sentiment Lexicon Expansion of Idioms}, author={Fuqiang Niu, Minghuan Tan, Bowen Zhang, Min Yang and Ruifeng Xu}, journal={arXiv preprint arXiv:2409.17588}, year={2024}, archivePrefix={arXiv}, eprint={2409.17588}, primaryClass={cs.CL} }
niu2024dualcots:
arxiv-662189
2409.17589
Improving Fast Adversarial Training via Self-Knowledge Guidance
<|reference_start|>Improving Fast Adversarial Training via Self-Knowledge Guidance: Adversarial training has achieved remarkable advancements in defending against adversarial attacks. Among them, fast adversarial training (FAT) is gaining attention for its ability to achieve competitive robustness with fewer computing resources. Existing FAT methods typically employ a uniform strategy that optimizes all training data equally without considering the influence of different examples, which leads to an imbalanced optimization. However, this imbalance remains unexplored in the field of FAT. In this paper, we conduct a comprehensive study of the imbalance issue in FAT and observe an obvious class disparity regarding their performances. This disparity could be embodied from a perspective of alignment between clean and robust accuracy. Based on the analysis, we mainly attribute the observed misalignment and disparity to the imbalanced optimization in FAT, which motivates us to optimize different training data adaptively to enhance robustness. Specifically, we take disparity and misalignment into consideration. First, we introduce self-knowledge guided regularization, which assigns differentiated regularization weights to each class based on its training state, alleviating class disparity. Additionally, we propose self-knowledge guided label relaxation, which adjusts label relaxation according to the training accuracy, alleviating the misalignment and improving robustness. By combining these methods, we formulate the Self-Knowledge Guided FAT (SKG-FAT), leveraging naturally generated knowledge during training to enhance the adversarial robustness without compromising training efficiency. Extensive experiments on four standard datasets demonstrate that the SKG-FAT improves the robustness and preserves competitive clean accuracy, outperforming the state-of-the-art methods.<|reference_end|>
arxiv
@article{jiang2024improving, title={Improving Fast Adversarial Training via Self-Knowledge Guidance}, author={Chengze Jiang, Junkai Wang, Minjing Dong, Jie Gui, Xinli Shi, Yuan Cao, Yuan Yan Tang, James Tin-Yau Kwok}, journal={arXiv preprint arXiv:2409.17589}, year={2024}, archivePrefix={arXiv}, eprint={2409.17589}, primaryClass={cs.CV cs.AI} }
jiang2024improving
arxiv-662190
2409.17591
Conjugate Bayesian Two-step Change Point Detection for Hawkes Process
<|reference_start|>Conjugate Bayesian Two-step Change Point Detection for Hawkes Process: The Bayesian two-step change point detection method is popular for the Hawkes process due to its simplicity and intuitiveness. However, the non-conjugacy between the point process likelihood and the prior requires most existing Bayesian two-step change point detection methods to rely on non-conjugate inference methods. These methods lack analytical expressions, leading to low computational efficiency and impeding timely change point detection. To address this issue, this work employs data augmentation to propose a conjugate Bayesian two-step change point detection method for the Hawkes process, which proves to be more accurate and efficient. Extensive experiments on both synthetic and real data demonstrate the superior effectiveness and efficiency of our method compared to baseline methods. Additionally, we conduct ablation studies to explore the robustness of our method concerning various hyperparameters. Our code is publicly available at https://github.com/Aurora2050/CoBay-CPD.<|reference_end|>
arxiv
@article{zhang2024conjugate, title={Conjugate Bayesian Two-step Change Point Detection for Hawkes Process}, author={Zeyue Zhang, Xiaoling Lu, Feng Zhou}, journal={arXiv preprint arXiv:2409.17591}, year={2024}, archivePrefix={arXiv}, eprint={2409.17591}, primaryClass={stat.ML cs.LG} }
zhang2024conjugate
arxiv-662191
2409.17592
Deep Manifold Part 1: Anatomy of Neural Network Manifold
<|reference_start|>Deep Manifold Part 1: Anatomy of Neural Network Manifold: Based on the numerical manifold method principle, we developed a mathematical framework of a neural network manifold: Deep Manifold and discovered that neural networks: 1) is numerical computation combining forward and inverse; 2) have near infinite degrees of freedom; 3) exponential learning capacity with depth; 4) have self-progressing boundary conditions; 5) has training hidden bottleneck. We also define two concepts: neural network learning space and deep manifold space and introduce two concepts: neural network intrinsic pathway and fixed point. We raise three fundamental questions: 1). What is the training completion definition; 2). where is the deep learning convergence point (neural network fixed point); 3). How important is token timestamp in training data given negative time is critical in inverse problem.<|reference_end|>
arxiv
@article{ma2024deep, title={Deep Manifold Part 1: Anatomy of Neural Network Manifold}, author={Max Y. Ma and Gen-Hua Shi}, journal={arXiv preprint arXiv:2409.17592}, year={2024}, archivePrefix={arXiv}, eprint={2409.17592}, primaryClass={cs.LG cs.AI} }
ma2024deep
arxiv-662192
2409.17593
AsIf: Asset Interface Analysis of Industrial Automation Devices
<|reference_start|>AsIf: Asset Interface Analysis of Industrial Automation Devices: As Industry 4.0 and the Industrial Internet of Things continue to advance, industrial control systems are increasingly adopting IT solutions, including communication standards and protocols. As these systems become more decentralized and interconnected, a critical need for enhanced security measures arises. Threat modeling is traditionally performed in structured brainstorming sessions involving domain and security experts. Such sessions, however, often fail to provide an exhaustive identification of assets and interfaces due to the lack of a systematic approach. This is a major issue, as it leads to poor threat modeling, resulting in insufficient mitigation strategies and, lastly, a flawed security architecture. We propose a method for the analysis of assets in industrial systems, with special focus on physical threats. Inspired by the ISO/OSI reference model, a systematic approach is introduced to help identify and classify asset interfaces. This results in an enriched system model of the asset, offering a comprehensive overview visually represented as an interface tree, thereby laying the foundation for subsequent threat modeling steps. To demonstrate the proposed method, the results of its application to a programmable logic controller (PLC) are presented. In support of this, a study involving a group of 12 security experts was conducted. Additionally, the study offers valuable insights into the experts' general perspectives and workflows on threat modeling.<|reference_end|>
arxiv
@article{rosenstatter2024asif:, title={AsIf: Asset Interface Analysis of Industrial Automation Devices}, author={Thomas Rosenstatter, Christian Sch"afer, Olaf Sa{ss}nick, Stefan Huber}, journal={arXiv preprint arXiv:2409.17593}, year={2024}, archivePrefix={arXiv}, eprint={2409.17593}, primaryClass={cs.CR} }
rosenstatter2024asif:
arxiv-662193
2409.17596
Subjective and Objective Quality-of-Experience Evaluation Study for Live Video Streaming
<|reference_start|>Subjective and Objective Quality-of-Experience Evaluation Study for Live Video Streaming: In recent years, live video streaming has gained widespread popularity across various social media platforms. Quality of experience (QoE), which reflects end-users' satisfaction and overall experience, plays a critical role for media service providers to optimize large-scale live compression and transmission strategies to achieve perceptually optimal rate-distortion trade-off. Although many QoE metrics for video-on-demand (VoD) have been proposed, there remain significant challenges in developing QoE metrics for live video streaming. To bridge this gap, we conduct a comprehensive study of subjective and objective QoE evaluations for live video streaming. For the subjective QoE study, we introduce the first live video streaming QoE dataset, TaoLive QoE, which consists of $42$ source videos collected from real live broadcasts and $1,155$ corresponding distorted ones degraded due to a variety of streaming distortions, including conventional streaming distortions such as compression, stalling, as well as live streaming-specific distortions like frame skipping, variable frame rate, etc. Subsequently, a human study was conducted to derive subjective QoE scores of videos in the TaoLive QoE dataset. For the objective QoE study, we benchmark existing QoE models on the TaoLive QoE dataset as well as publicly available QoE datasets for VoD scenarios, highlighting that current models struggle to accurately assess video QoE, particularly for live content. Hence, we propose an end-to-end QoE evaluation model, Tao-QoE, which integrates multi-scale semantic features and optical flow-based motion features to predicting a retrospective QoE score, eliminating reliance on statistical quality of service (QoS) features.<|reference_end|>
arxiv
@article{zhu2024subjective, title={Subjective and Objective Quality-of-Experience Evaluation Study for Live Video Streaming}, author={Zehao Zhu, Wei Sun, Jun Jia, Wei Wu, Sibin Deng, Kai Li, Ying Chen, Xiongkuo Min, Jia Wang, Guangtao Zhai}, journal={arXiv preprint arXiv:2409.17596}, year={2024}, archivePrefix={arXiv}, eprint={2409.17596}, primaryClass={cs.MM cs.AI eess.IV} }
zhu2024subjective
arxiv-662194
2409.17597
Unifying Dimensions: A Linear Adaptive Approach to Lightweight Image Super-Resolution
<|reference_start|>Unifying Dimensions: A Linear Adaptive Approach to Lightweight Image Super-Resolution: Window-based transformers have demonstrated outstanding performance in super-resolution tasks due to their adaptive modeling capabilities through local self-attention (SA). However, they exhibit higher computational complexity and inference latency than convolutional neural networks. In this paper, we first identify that the adaptability of the Transformers is derived from their adaptive spatial aggregation and advanced structural design, while their high latency results from the computational costs and memory layout transformations associated with the local SA. To simulate this aggregation approach, we propose an effective convolution-based linear focal separable attention (FSA), allowing for long-range dynamic modeling with linear complexity. Additionally, we introduce an effective dual-branch structure combined with an ultra-lightweight information exchange module (IEM) to enhance the aggregation of information by the Token Mixer. Finally, with respect to the structure, we modify the existing spatial-gate-based feedforward neural networks by incorporating a self-gate mechanism to preserve high-dimensional channel information, enabling the modeling of more complex relationships. With these advancements, we construct a convolution-based Transformer framework named the linear adaptive mixer network (LAMNet). Extensive experiments demonstrate that LAMNet achieves better performance than existing SA-based Transformer methods while maintaining the computational efficiency of convolutional neural networks, which can achieve a \(3\times\) speedup of inference time. The code will be publicly available at: https://github.com/zononhzy/LAMNet.<|reference_end|>
arxiv
@article{hu2024unifying, title={Unifying Dimensions: A Linear Adaptive Approach to Lightweight Image Super-Resolution}, author={Zhenyu Hu and Wanjie Sun}, journal={arXiv preprint arXiv:2409.17597}, year={2024}, archivePrefix={arXiv}, eprint={2409.17597}, primaryClass={cs.CV} }
hu2024unifying
arxiv-662195
2409.17598
Freeze and Learn: Continual Learning with Selective Freezing for Speech Deepfake Detection
<|reference_start|>Freeze and Learn: Continual Learning with Selective Freezing for Speech Deepfake Detection: In speech deepfake detection, one of the critical aspects is developing detectors able to generalize on unseen data and distinguish fake signals across different datasets. Common approaches to this challenge involve incorporating diverse data into the training process or fine-tuning models on unseen datasets. However, these solutions can be computationally demanding and may lead to the loss of knowledge acquired from previously learned data. Continual learning techniques offer a potential solution to this problem, allowing the models to learn from unseen data without losing what they have already learned. Still, the optimal way to apply these algorithms for speech deepfake detection remains unclear, and we do not know which is the best way to apply these algorithms to the developed models. In this paper we address this aspect and investigate whether, when retraining a speech deepfake detector, it is more effective to apply continual learning across the entire model or to update only some of its layers while freezing others. Our findings, validated across multiple models, indicate that the most effective approach among the analyzed ones is to update only the weights of the initial layers, which are responsible for processing the input features of the detector.<|reference_end|>
arxiv
@article{salvi2024freeze, title={Freeze and Learn: Continual Learning with Selective Freezing for Speech Deepfake Detection}, author={Davide Salvi, Viola Negroni, Luca Bondi, Paolo Bestagini, Stefano Tubaro}, journal={arXiv preprint arXiv:2409.17598}, year={2024}, archivePrefix={arXiv}, eprint={2409.17598}, primaryClass={cs.SD eess.AS} }
salvi2024freeze
arxiv-662196
2409.17600
Attitudes and perceived effectiveness among first-time online instructors during Covid-19
<|reference_start|>Attitudes and perceived effectiveness among first-time online instructors during Covid-19: Online teaching has expanded access to education, offering flexibility compared to traditional face-to-face instruction. While early research has explored online teaching, it is important to understand the perspective of instructors who conducted their first online classes during the Covid-19 pandemic. This study focuses on instructors teaching online for the first time, regardless of whether they volunteered. Surveys were conducted when universities transitioned from in-person to online instruction in April 2020, with a follow-up survey after their first online teaching semester. The study investigated instructors' expectations of class success before their first online teaching experience. Using Bayesian modeling, we analyzed how these expectations varied based on instructors' characteristics (self-efficacy in online teaching, technological proficiency, and acceptance of technology) and course attributes (subject area, class size, and instructional design). Results showed that instructors' self-efficacy significantly impacted their expectations of success, while smaller class sizes were associated with lower expectations. Interestingly, factors like prior use of technology platforms and classroom design did not contribute significantly to expectations. The study offers practical recommendations to support online teaching. To improve self-efficacy, instructors should collaborate with colleagues and familiarize themselves with online platforms. Universities should provide workshops or training to enhance teaching skills. In small interactive classes, nonverbal communication should be emphasized, and institutions should establish support teams and feedback mechanisms to ensure quality and effectiveness in online education.<|reference_end|>
arxiv
@article{zhang2024attitudes, title={Attitudes and perceived effectiveness among first-time online instructors during Covid-19}, author={Owen Xingjian Zhang}, journal={arXiv preprint arXiv:2409.17600}, year={2024}, archivePrefix={arXiv}, eprint={2409.17600}, primaryClass={cs.HC} }
zhang2024attitudes
arxiv-662197
2409.17601
TA-Cleaner: A Fine-grained Text Alignment Backdoor Defense Strategy for Multimodal Contrastive Learning
<|reference_start|>TA-Cleaner: A Fine-grained Text Alignment Backdoor Defense Strategy for Multimodal Contrastive Learning: Pre-trained large models for multimodal contrastive learning, such as CLIP, have been widely recognized in the industry as highly susceptible to data-poisoned backdoor attacks. This poses significant risks to downstream model training. In response to such potential threats, finetuning offers a simpler and more efficient defense choice compared to retraining large models with augmented data. In the supervised learning domain, fine-tuning defense strategies can achieve excellent defense performance. However, in the unsupervised and semi-supervised domain, we find that when CLIP faces some complex attack techniques, the existing fine-tuning defense strategy, CleanCLIP, has some limitations on defense performance. The synonym substitution of its text-augmentation is insufficient to enhance the text feature space. To compensate for this weakness, we improve it by proposing a fine-grained \textbf{T}ext \textbf{A}lignment \textbf{C}leaner (TA-Cleaner) to cut off feature connections of backdoor triggers. We randomly select a few samples for positive and negative subtext generation at each epoch of CleanCLIP, and align the subtexts to the images to strengthen the text self-supervision. We evaluate the effectiveness of our TA-Cleaner against six attack algorithms and conduct comprehensive zero-shot classification tests on ImageNet1K. Our experimental results demonstrate that TA-Cleaner achieves state-of-the-art defensiveness among finetuning-based defense techniques. Even when faced with the novel attack technique BadCLIP, our TA-Cleaner outperforms CleanCLIP by reducing the ASR of Top-1 and Top-10 by 52.02\% and 63.88\%, respectively.<|reference_end|>
arxiv
@article{xun2024ta-cleaner:, title={TA-Cleaner: A Fine-grained Text Alignment Backdoor Defense Strategy for Multimodal Contrastive Learning}, author={Yuan Xun, Siyuan Liang, Xiaojun Jia, Xinwei Liu, Xiaochun Cao}, journal={arXiv preprint arXiv:2409.17601}, year={2024}, archivePrefix={arXiv}, eprint={2409.17601}, primaryClass={cs.CV cs.AI} }
xun2024ta-cleaner:
arxiv-662198
2409.17602
Open Digital Rights Enforcement Framework (ODRE): from descriptive to enforceable policies
<|reference_start|>Open Digital Rights Enforcement Framework (ODRE): from descriptive to enforceable policies: From centralised platforms to decentralised ecosystems, like Data Spaces, sharing data has become a paramount challenge. For this reason, the definition of data usage policies has become crucial in these domains, highlighting the necessity of effective policy enforcement mechanisms. The Open Digital Rights Language (ODRL) is a W3C standard ontology designed to describe data usage policies, however, it lacks built-in enforcement capabilities, limiting its practical application. This paper introduces the Open Digital Rights Enforcement (ODRE) framework, whose goal is to provide ODRL with enforcement capabilities. The ODRE framework proposes a novel approach to express ODRL policies that integrates the descriptive ontology terms of ODRL with other languages that allow behaviour specification, such as dynamic data handling or function evaluation. The framework includes an enforcement algorithm for ODRL policies and two open-source implementations in Python and Java. The ODRE framework is also designed to support future extensions of ODRL to specific domain scenarios. In addition, current limitations of ODRE, ODRL, and current challenges are reported. Finally, to demonstrate the enforcement capabilities of the implementations, their performance, and their extensibility features, several experiments have been carried out with positive results.<|reference_end|>
arxiv
@article{cimmino2024open, title={Open Digital Rights Enforcement Framework (ODRE): from descriptive to enforceable policies}, author={Andrea Cimmino, Juan Cano-Benito, Ra'ul Garc'ia-Castro}, journal={arXiv preprint arXiv:2409.17602}, year={2024}, archivePrefix={arXiv}, eprint={2409.17602}, primaryClass={cs.CR cs.AI} }
cimmino2024open
arxiv-662199
2409.17603
Deep CLAS: Deep Contextual Listen, Attend and Spell
<|reference_start|>Deep CLAS: Deep Contextual Listen, Attend and Spell: Contextual-LAS (CLAS) has been shown effective in improving Automatic Speech Recognition (ASR) of rare words. It relies on phrase-level contextual modeling and attention-based relevance scoring without explicit contextual constraint which lead to insufficient use of contextual information. In this work, we propose deep CLAS to use contextual information better. We introduce bias loss forcing model to focus on contextual information. The query of bias attention is also enriched to improve the accuracy of the bias attention score. To get fine-grained contextual information, we replace phrase-level encoding with character-level encoding and encode contextual information with conformer rather than LSTM. Moreover, we directly use the bias attention score to correct the output probability distribution of the model. Experiments using the public AISHELL-1 and AISHELL-NER. On AISHELL-1, compared to CLAS baselines, deep CLAS obtains a 65.78% relative recall and a 53.49% relative F1-score increase in the named entity recognition scene.<|reference_end|>
arxiv
@article{xiong2024deep, title={Deep CLAS: Deep Contextual Listen, Attend and Spell}, author={Shifu Xiong, Mengzhi Wang, Genshun Wan, Hang Chen, Jianqing Gao, Lirong Dai}, journal={arXiv preprint arXiv:2409.17603}, year={2024}, archivePrefix={arXiv}, eprint={2409.17603}, primaryClass={cs.CL cs.SD eess.AS} }
xiong2024deep
arxiv-662200
2409.17604
RmGPT: Rotating Machinery Generative Pretrained Model
<|reference_start|>RmGPT: Rotating Machinery Generative Pretrained Model: In industry, the reliability of rotating machinery is critical for production efficiency and safety. Current methods of Prognostics and Health Management (PHM) often rely on task-specific models, which face significant challenges in handling diverse datasets with varying signal characteristics, fault modes and operating conditions. Inspired by advancements in generative pretrained models, we propose RmGPT, a unified model for diagnosis and prognosis tasks. RmGPT introduces a novel token-based framework, incorporating Signal Tokens, Prompt Tokens, Time-Frequency Task Tokens and Fault Tokens to handle heterogeneous data within a unified model architecture. We leverage self-supervised learning for robust feature extraction and introduce a next signal token prediction pretraining strategy, alongside efficient prompt learning for task-specific adaptation. Extensive experiments demonstrate that RmGPT significantly outperforms state-of-the-art algorithms, achieving near-perfect accuracy in diagnosis tasks and exceptionally low errors in prognosis tasks. Notably, RmGPT excels in few-shot learning scenarios, achieving 92% accuracy in 16-class one-shot experiments, highlighting its adaptability and robustness. This work establishes RmGPT as a powerful PHM foundation model for rotating machinery, advancing the scalability and generalizability of PHM solutions.<|reference_end|>
arxiv
@article{wang2024rmgpt:, title={RmGPT: Rotating Machinery Generative Pretrained Model}, author={Yilin Wang, Yifei Yu, Kong Sun, Peixuan Lei, Yuxuan Zhang, Enrico Zio, Aiguo Xia, Yuanxiang Li}, journal={arXiv preprint arXiv:2409.17604}, year={2024}, archivePrefix={arXiv}, eprint={2409.17604}, primaryClass={cs.LG} }
wang2024rmgpt: