corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-666701
2410.05354
Over-the-Air Federated Learning in Cell-Free MIMO with Long-term Power Constraint
<|reference_start|>Over-the-Air Federated Learning in Cell-Free MIMO with Long-term Power Constraint: Wireless networks supporting artificial intelligence have gained significant attention, with Over-the-Air Federated Learning emerging as a key application due to its unique transmission and distributed computing characteristics. This paper derives error bounds for Over-the-Air Federated Learning in a Cell-free MIMO system and formulates an optimization problem to minimize optimality gap via joint optimization of power control and beamforming. We introduce the MOP-LOFPC algorithm, which employs Lyapunov optimization to decouple long-term constraints across rounds while requiring only causal channel state information. Experimental results demonstrate that MOP-LOFPC achieves a better and more flexible trade-off between the model's training loss and adherence to long-term power constraints compared to existing baselines.<|reference_end|>
arxiv
@article{wang2024over-the-air, title={Over-the-Air Federated Learning in Cell-Free MIMO with Long-term Power Constraint}, author={Yifan Wang, Cheng Zhang, Yuanndon Zhuang, Mingzeng Dai, Haiming Wang, Yongming Huang}, journal={arXiv preprint arXiv:2410.05354}, year={2024}, archivePrefix={arXiv}, eprint={2410.05354}, primaryClass={cs.LG cs.AI} }
wang2024over-the-air
arxiv-666702
2410.05355
Falcon Mamba: The First Competitive Attention-free 7B Language Model
<|reference_start|>Falcon Mamba: The First Competitive Attention-free 7B Language Model: In this technical report, we present Falcon Mamba 7B, a new base large language model based on the novel Mamba architecture. Falcon Mamba 7B is trained on 5.8 trillion tokens with carefully selected data mixtures. As a pure Mamba-based model, Falcon Mamba 7B surpasses leading open-weight models based on Transformers, such as Mistral 7B, Llama3.1 8B, and Falcon2 11B. It is on par with Gemma 7B and outperforms models with different architecture designs, such as RecurrentGemma 9B and RWKV-v6 Finch 7B/14B. Currently, Falcon Mamba 7B is the best-performing Mamba model in the literature at this scale, surpassing both existing Mamba and hybrid Mamba-Transformer models, according to the Open LLM Leaderboard. Due to its architecture, Falcon Mamba 7B is significantly faster at inference and requires substantially less memory for long sequence generation. Despite recent studies suggesting that hybrid Mamba-Transformer models outperform pure architecture designs, we demonstrate that even the pure Mamba design can achieve similar, or even superior results compared to the Transformer and hybrid designs. We make the weights of our implementation of Falcon Mamba 7B publicly available on https://huggingface.co/tiiuae/falcon-mamba-7b, under a permissive license.<|reference_end|>
arxiv
@article{zuo2024falcon, title={Falcon Mamba: The First Competitive Attention-free 7B Language Model}, author={Jingwei Zuo, Maksim Velikanov, Dhia Eddine Rhaiem, Ilyas Chahed, Younes Belkada, Guillaume Kunsch and Hakim Hacid}, journal={arXiv preprint arXiv:2410.05355}, year={2024}, archivePrefix={arXiv}, eprint={2410.05355}, primaryClass={cs.CL cs.AI} }
zuo2024falcon
arxiv-666703
2410.05356
BSG4Bot: Efficient Bot Detection based on Biased Heterogeneous Subgraphs
<|reference_start|>BSG4Bot: Efficient Bot Detection based on Biased Heterogeneous Subgraphs: The detection of malicious social bots has become a crucial task, as bots can be easily deployed and manipulated to spread disinformation, promote conspiracy messages, and more. Most existing approaches utilize graph neural networks (GNNs)to capture both user profle and structural features,achieving promising progress. However, they still face limitations including the expensive training on large underlying graph, the performance degration when similar neighborhood patterns' assumption preferred by GNNs is not satisfied, and the dynamic features of bots in a highly adversarial context. Motivated by these limitations, this paper proposes a method named BSG4Bot with an intuition that GNNs training on Biased SubGraphs can improve both performance and time/space efficiency in bot detection. Specifically, BSG4Bot first pre-trains a classifier on node features efficiently to define the node similarities, and constructs biased subgraphs by combining the similarities computed by the pre-trained classifier and the node importances computed by Personalized PageRank (PPR scores). BSG4Bot then introduces a heterogeneous GNN over the constructed subgraphs to detect bots effectively and efficiently. The relatively stable features, including the content category and temporal activity features, are explored and incorporated into BSG4Bot after preliminary verification on sample data. The extensive experimental studies show that BSG4Bot outperforms the state-of-the-art bot detection methods, while only needing nearly 1/5 training time.<|reference_end|>
arxiv
@article{miao2024bsg4bot:, title={BSG4Bot: Efficient Bot Detection based on Biased Heterogeneous Subgraphs}, author={Hao Miao, Zida Liu, and Jun Gao}, journal={arXiv preprint arXiv:2410.05356}, year={2024}, archivePrefix={arXiv}, eprint={2410.05356}, primaryClass={cs.LG cs.AI} }
miao2024bsg4bot:
arxiv-666704
2410.05357
Model-GLUE: Democratized LLM Scaling for A Large Model Zoo in the Wild
<|reference_start|>Model-GLUE: Democratized LLM Scaling for A Large Model Zoo in the Wild: As Large Language Models (LLMs) excel across tasks and specialized domains, scaling LLMs based on existing models has garnered significant attention, which faces the challenge of decreasing performance when combining disparate models. Various techniques have been proposed for the aggregation of pre-trained LLMs, including model merging, Mixture-of-Experts, and stacking. Despite their merits, a comprehensive comparison and synergistic application of them to a diverse model zoo is yet to be adequately addressed. In light of this research gap, this paper introduces Model-GLUE, a holistic LLM scaling guideline. First, our work starts with a benchmarking of existing LLM scaling techniques, especially selective merging, and variants of mixture. Utilizing the insights from the benchmark results, we formulate an strategy for the selection and aggregation of a heterogeneous model zoo characterizing different architectures and initialization. Our methodology involves the clustering of mergeable models and optimal merging strategy selection, and the integration of clusters through a model mixture. Finally, evidenced by our experiments on a diverse Llama-2-based model zoo, Model-GLUE shows an average performance enhancement of 5.61%, achieved without additional training. Codes are available at: https://github.com/Model-GLUE/Model-GLUE.<|reference_end|>
arxiv
@article{zhao2024model-glue:, title={Model-GLUE: Democratized LLM Scaling for A Large Model Zoo in the Wild}, author={Xinyu Zhao, Guoheng Sun, Ruisi Cai, Yukun Zhou, Pingzhi Li, Peihao Wang, Bowen Tan, Yexiao He, Li Chen, Yi Liang, Beidi Chen, Binhang Yuan, Hongyi Wang, Ang Li, Zhangyang Wang, Tianlong Chen}, journal={arXiv preprint arXiv:2410.05357}, year={2024}, archivePrefix={arXiv}, eprint={2410.05357}, primaryClass={cs.LG cs.AI cs.CL} }
zhao2024model-glue:
arxiv-666705
2410.05358
A Predictive and Optimization Approach for Enhanced Urban Mobility Using Spatiotemporal Data
<|reference_start|>A Predictive and Optimization Approach for Enhanced Urban Mobility Using Spatiotemporal Data: In modern urban centers, effective transportation management poses a significant challenge, with traffic jams and inconsistent travel durations greatly affecting commuters and logistics operations. This study introduces a novel method for enhancing urban mobility by combining machine learning algorithms with live traffic information. We developed predictive models for journey time and congestion analysis using data from New York City's yellow taxi trips. The research employed a spatiotemporal analysis framework to identify traffic trends and implemented real-time route optimization using the GraphHopper API. This system determines the most efficient paths based on current conditions, adapting to changes in traffic flow. The methodology utilizes Spark MLlib for predictive modeling and Spark Streaming for processing data in real-time. By integrating historical data analysis with current traffic inputs, our system shows notable enhancements in both travel time forecasts and route optimization, demonstrating its potential for widespread application in major urban areas. This research contributes to ongoing efforts aimed at reducing urban congestion and improving transportation efficiency through advanced data-driven methods.<|reference_end|>
arxiv
@article{mishra2024a, title={A Predictive and Optimization Approach for Enhanced Urban Mobility Using Spatiotemporal Data}, author={Shambhavi Mishra, T. Satyanarayana Murthy}, journal={arXiv preprint arXiv:2410.05358}, year={2024}, archivePrefix={arXiv}, eprint={2410.05358}, primaryClass={cs.LG} }
mishra2024a
arxiv-666706
2410.05359
Interactive Event Sifting using Bayesian Graph Neural Networks
<|reference_start|>Interactive Event Sifting using Bayesian Graph Neural Networks: Forensic analysts often use social media imagery and texts to understand important events. A primary challenge is the initial sifting of irrelevant posts. This work introduces an interactive process for training an event-centric, learning-based multimodal classification model that automates sanitization. We propose a method based on Bayesian Graph Neural Networks (BGNNs) and evaluate active learning and pseudo-labeling formulations to reduce the number of posts the analyst must manually annotate. Our results indicate that BGNNs are useful for social-media data sifting for forensics investigations of events of interest, the value of active learning and pseudo-labeling varies based on the setting, and incorporating unlabelled data from other events improves performance.<|reference_end|>
arxiv
@article{nascimento2024interactive, title={Interactive Event Sifting using Bayesian Graph Neural Networks}, author={Jos'e Nascimento, Nathan Jacobs and Anderson Rocha}, journal={arXiv preprint arXiv:2410.05359}, year={2024}, archivePrefix={arXiv}, eprint={2410.05359}, primaryClass={cs.LG cs.SI} }
nascimento2024interactive
arxiv-666707
2410.05361
RespLLM: Unifying Audio and Text with Multimodal LLMs for Generalized Respiratory Health Prediction
<|reference_start|>RespLLM: Unifying Audio and Text with Multimodal LLMs for Generalized Respiratory Health Prediction: The high incidence and mortality rates associated with respiratory diseases underscores the importance of early screening. Machine learning models can automate clinical consultations and auscultation, offering vital support in this area. However, the data involved, spanning demographics, medical history, symptoms, and respiratory audio, are heterogeneous and complex. Existing approaches are insufficient and lack generalizability, as they typically rely on limited training data, basic fusion techniques, and task-specific models. In this paper, we propose RespLLM, a novel multimodal large language model (LLM) framework that unifies text and audio representations for respiratory health prediction. RespLLM leverages the extensive prior knowledge of pretrained LLMs and enables effective audio-text fusion through cross-modal attentions. Instruction tuning is employed to integrate diverse data from multiple sources, ensuring generalizability and versatility of the model. Experiments on five real-world datasets demonstrate that RespLLM outperforms leading baselines by an average of 4.6% on trained tasks, 7.9% on unseen datasets, and facilitates zero-shot predictions for new tasks. Our work lays the foundation for multimodal models that can perceive, listen to, and understand heterogeneous data, paving the way for scalable respiratory health diagnosis.<|reference_end|>
arxiv
@article{zhang2024respllm:, title={RespLLM: Unifying Audio and Text with Multimodal LLMs for Generalized Respiratory Health Prediction}, author={Yuwei Zhang, Tong Xia, Aaqib Saeed, Cecilia Mascolo}, journal={arXiv preprint arXiv:2410.05361}, year={2024}, archivePrefix={arXiv}, eprint={2410.05361}, primaryClass={cs.LG cs.AI cs.SD eess.AS} }
zhang2024respllm:
arxiv-666708
2410.05362
LLMs Are In-Context Reinforcement Learners
<|reference_start|>LLMs Are In-Context Reinforcement Learners: Large Language Models (LLMs) can learn new tasks through in-context supervised learning (i.e., ICL). This work studies if this ability extends to in-context reinforcement learning (ICRL), where models are not given gold labels in context, but only their past predictions and rewards. We show that a naive application of ICRL fails miserably, and identify the root cause as a fundamental deficiency at exploration, which leads to quick model degeneration. We propose an algorithm to address this deficiency by increasing test-time compute, as well as a compute-bound approximation. We use several challenging classification tasks to empirically show that our ICRL algorithms lead to effective learning from rewards alone, and analyze the characteristics of this ability and our methods. Overall, our results reveal remarkable ICRL abilities in LLMs.<|reference_end|>
arxiv
@article{monea2024llms, title={LLMs Are In-Context Reinforcement Learners}, author={Giovanni Monea, Antoine Bosselut, Kiant'e Brantley, Yoav Artzi}, journal={arXiv preprint arXiv:2410.05362}, year={2024}, archivePrefix={arXiv}, eprint={2410.05362}, primaryClass={cs.CL cs.AI cs.LG} }
monea2024llms
arxiv-666709
2410.05363
Towards World Simulator: Crafting Physical Commonsense-Based Benchmark for Video Generation
<|reference_start|>Towards World Simulator: Crafting Physical Commonsense-Based Benchmark for Video Generation: Text-to-video (T2V) models like Sora have made significant strides in visualizing complex prompts, which is increasingly viewed as a promising path towards constructing the universal world simulator. Cognitive psychologists believe that the foundation for achieving this goal is the ability to understand intuitive physics. However, the capacity of these models to accurately represent intuitive physics remains largely unexplored. To bridge this gap, we introduce PhyGenBench, a comprehensive \textbf{Phy}sics \textbf{Gen}eration \textbf{Ben}chmark designed to evaluate physical commonsense correctness in T2V generation. PhyGenBench comprises 160 carefully crafted prompts across 27 distinct physical laws, spanning four fundamental domains, which could comprehensively assesses models' understanding of physical commonsense. Alongside PhyGenBench, we propose a novel evaluation framework called PhyGenEval. This framework employs a hierarchical evaluation structure utilizing appropriate advanced vision-language models and large language models to assess physical commonsense. Through PhyGenBench and PhyGenEval, we can conduct large-scale automated assessments of T2V models' understanding of physical commonsense, which align closely with human feedback. Our evaluation results and in-depth analysis demonstrate that current models struggle to generate videos that comply with physical commonsense. Moreover, simply scaling up models or employing prompt engineering techniques is insufficient to fully address the challenges presented by PhyGenBench (e.g., dynamic scenarios). We hope this study will inspire the community to prioritize the learning of physical commonsense in these models beyond entertainment applications. We will release the data and codes at https://github.com/OpenGVLab/PhyGenBench<|reference_end|>
arxiv
@article{meng2024towards, title={Towards World Simulator: Crafting Physical Commonsense-Based Benchmark for Video Generation}, author={Fanqing Meng, Jiaqi Liao, Xinyu Tan, Wenqi Shao, Quanfeng Lu, Kaipeng Zhang, Yu Cheng, Dianqi Li, Yu Qiao, Ping Luo}, journal={arXiv preprint arXiv:2410.05363}, year={2024}, archivePrefix={arXiv}, eprint={2410.05363}, primaryClass={cs.CV} }
meng2024towards
arxiv-666710
2410.05364
Diffusion Model Predictive Control
<|reference_start|>Diffusion Model Predictive Control: We propose Diffusion Model Predictive Control (D-MPC), a novel MPC approach that learns a multi-step action proposal and a multi-step dynamics model, both using diffusion models, and combines them for use in online MPC. On the popular D4RL benchmark, we show performance that is significantly better than existing model-based offline planning methods using MPC and competitive with state-of-the-art (SOTA) model-based and model-free reinforcement learning methods. We additionally illustrate D-MPC's ability to optimize novel reward functions at run time and adapt to novel dynamics, and highlight its advantages compared to existing diffusion-based planning baselines.<|reference_end|>
arxiv
@article{zhou2024diffusion, title={Diffusion Model Predictive Control}, author={Guangyao Zhou, Sivaramakrishnan Swaminathan, Rajkumar Vasudeva Raju, J. Swaroop Guntupalli, Wolfgang Lehrach, Joseph Ortiz, Antoine Dedieu, Miguel L'azaro-Gredilla, Kevin Murphy}, journal={arXiv preprint arXiv:2410.05364}, year={2024}, archivePrefix={arXiv}, eprint={2410.05364}, primaryClass={cs.LG cs.AI} }
zhou2024diffusion
arxiv-666711
2410.05401
Post-hoc Study of Climate Microtargeting on Social Media Ads with LLMs: Thematic Insights and Fairness Evaluation
<|reference_start|>Post-hoc Study of Climate Microtargeting on Social Media Ads with LLMs: Thematic Insights and Fairness Evaluation: Climate change communication on social media increasingly employs microtargeting strategies to effectively reach and influence specific demographic groups. This study presents a post-hoc analysis of microtargeting practices within climate campaigns by leveraging large language models (LLMs) to examine Facebook advertisements. Our analysis focuses on two key aspects: demographic targeting and fairness. We evaluate the ability of LLMs to accurately predict the intended demographic targets, such as gender and age group, achieving an overall accuracy of 88.55%. Furthermore, we instruct the LLMs to generate explanations for their classifications, providing transparent reasoning behind each decision. These explanations reveal the specific thematic elements used to engage different demographic segments, highlighting distinct strategies tailored to various audiences. Our findings show that young adults are primarily targeted through messages emphasizing activism and environmental consciousness, while women are engaged through themes related to caregiving roles and social advocacy. In addition to evaluating the effectiveness of LLMs in detecting microtargeted messaging, we conduct a comprehensive fairness analysis to identify potential biases in model predictions. Our findings indicate that while LLMs perform well overall, certain biases exist, particularly in the classification of senior citizens and male audiences. By showcasing the efficacy of LLMs in dissecting and explaining targeted communication strategies and by highlighting fairness concerns, this study provides a valuable framework for future research aimed at enhancing transparency, accountability, and inclusivity in social media-driven climate campaigns.<|reference_end|>
arxiv
@article{islam2024post-hoc, title={Post-hoc Study of Climate Microtargeting on Social Media Ads with LLMs: Thematic Insights and Fairness Evaluation}, author={Tunazzina Islam, Dan Goldwasser}, journal={arXiv preprint arXiv:2410.05401}, year={2024}, archivePrefix={arXiv}, eprint={2410.05401}, primaryClass={cs.CL cs.AI cs.CY cs.SI} }
islam2024post-hoc
arxiv-666712
2410.05403
Deep learning-based Visual Measurement Extraction within an Adaptive Digital Twin Framework from Limited Data Using Transfer Learning
<|reference_start|>Deep learning-based Visual Measurement Extraction within an Adaptive Digital Twin Framework from Limited Data Using Transfer Learning: Digital Twins technology is revolutionizing decision-making in scientific research by integrating models and simulations with real-time data. Unlike traditional Structural Health Monitoring methods, which rely on computationally intensive Digital Image Correlation and have limitations in real-time data integration, this research proposes a novel approach using Artificial Intelligence. Specifically, Convolutional Neural Networks are employed to analyze structural behaviors in real-time by correlating Digital Image Correlation speckle pattern images with deformation fields. Initially focusing on two-dimensional speckle patterns, the research extends to three-dimensional applications using stereo-paired images for comprehensive deformation analysis. This method overcomes computational challenges by utilizing a mix of synthetically generated and authentic speckle pattern images for training the Convolutional Neural Networks. The models are designed to be robust and versatile, offering a promising alternative to traditional measurement techniques and paving the way for advanced applications in three-dimensional modeling. This advancement signifies a shift towards more efficient and dynamic structural health monitoring by leveraging the power of Artificial Intelligence for real-time simulation and analysis.<|reference_end|>
arxiv
@article{dizaji2024deep, title={Deep learning-based Visual Measurement Extraction within an Adaptive Digital Twin Framework from Limited Data Using Transfer Learning}, author={Mehrdad Shafiei Dizaji}, journal={arXiv preprint arXiv:2410.05403}, year={2024}, archivePrefix={arXiv}, eprint={2410.05403}, primaryClass={cs.CV eess.IV} }
dizaji2024deep
arxiv-666713
2410.05405
SharpSLAM: 3D Object-Oriented Visual SLAM with Deblurring for Agile Drones
<|reference_start|>SharpSLAM: 3D Object-Oriented Visual SLAM with Deblurring for Agile Drones: The paper focuses on the algorithm for improving the quality of 3D reconstruction and segmentation in DSP-SLAM by enhancing the RGB image quality. SharpSLAM algorithm developed by us aims to decrease the influence of high dynamic motion on visual object-oriented SLAM through image deblurring, improving all aspects of object-oriented SLAM, including localization, mapping, and object reconstruction. The experimental results revealed noticeable improvement in object detection quality, with F-score increased from 82.9% to 86.2% due to the higher number of features and corresponding map points. The RMSE of signed distance function has also decreased from 17.2 to 15.4 cm. Furthermore, our solution has enhanced object positioning, with an increase in the IoU from 74.5% to 75.7%. SharpSLAM algorithm has the potential to highly improve the quality of 3D reconstruction and segmentation in DSP-SLAM and to impact a wide range of fields, including robotics, autonomous vehicles, and augmented reality.<|reference_end|>
arxiv
@article{davletshin2024sharpslam:, title={SharpSLAM: 3D Object-Oriented Visual SLAM with Deblurring for Agile Drones}, author={Denis Davletshin, Iana Zhura, Vladislav Cheremnykh, Mikhail Rybiyanov, Aleksey Fedoseev, Dzmitry Tsetserukou}, journal={arXiv preprint arXiv:2410.05405}, year={2024}, archivePrefix={arXiv}, eprint={2410.05405}, primaryClass={cs.RO} }
davletshin2024sharpslam:
arxiv-666714
2410.05406
Synthesizing Interpretable Control Policies through Large Language Model Guided Search
<|reference_start|>Synthesizing Interpretable Control Policies through Large Language Model Guided Search: The combination of Large Language Models (LLMs), systematic evaluation, and evolutionary algorithms has enabled breakthroughs in combinatorial optimization and scientific discovery. We propose to extend this powerful combination to the control of dynamical systems, generating interpretable control policies capable of complex behaviors. With our novel method, we represent control policies as programs in standard languages like Python. We evaluate candidate controllers in simulation and evolve them using a pre-trained LLM. Unlike conventional learning-based control techniques, which rely on black box neural networks to encode control policies, our approach enhances transparency and interpretability. We still take advantage of the power of large AI models, but leverage it at the policy design phase, ensuring that all system components remain interpretable and easily verifiable at runtime. Additionally, the use of standard programming languages makes it straightforward for humans to finetune or adapt the controllers based on their expertise and intuition. We illustrate our method through its application to the synthesis of an interpretable control policy for the pendulum swing-up and the ball in cup tasks. We make the code available at https://github.com/muellerlab/synthesizing_interpretable_control_policies.git<|reference_end|>
arxiv
@article{bosio2024synthesizing, title={Synthesizing Interpretable Control Policies through Large Language Model Guided Search}, author={Carlo Bosio and Mark W. Mueller}, journal={arXiv preprint arXiv:2410.05406}, year={2024}, archivePrefix={arXiv}, eprint={2410.05406}, primaryClass={cs.AI cs.SY eess.SY} }
bosio2024synthesizing
arxiv-666715
2410.05407
Improving Predictor Reliability with Selective Recalibration
<|reference_start|>Improving Predictor Reliability with Selective Recalibration: A reliable deep learning system should be able to accurately express its confidence with respect to its predictions, a quality known as calibration. One of the most effective ways to produce reliable confidence estimates with a pre-trained model is by applying a post-hoc recalibration method. Popular recalibration methods like temperature scaling are typically fit on a small amount of data and work in the model's output space, as opposed to the more expressive feature embedding space, and thus usually have only one or a handful of parameters. However, the target distribution to which they are applied is often complex and difficult to fit well with such a function. To this end we propose \textit{selective recalibration}, where a selection model learns to reject some user-chosen proportion of the data in order to allow the recalibrator to focus on regions of the input space that can be well-captured by such a model. We provide theoretical analysis to motivate our algorithm, and test our method through comprehensive experiments on difficult medical imaging and zero-shot classification tasks. Our results show that selective recalibration consistently leads to significantly lower calibration error than a wide range of selection and recalibration baselines.<|reference_end|>
arxiv
@article{zollo2024improving, title={Improving Predictor Reliability with Selective Recalibration}, author={Thomas P. Zollo, Zhun Deng, Jake C. Snell, Toniann Pitassi, Richard Zemel}, journal={arXiv preprint arXiv:2410.05407}, year={2024}, archivePrefix={arXiv}, eprint={2410.05407}, primaryClass={cs.LG cs.AI} }
zollo2024improving
arxiv-666716
2410.05409
An Efficient Method for Solving Lane Emden Equation using Legendre Neural Network
<|reference_start|>An Efficient Method for Solving Lane Emden Equation using Legendre Neural Network: The aim of this manuscript is to address non-linear differential equations of the Lane Emden equation of second order using the shifted Legendre neural network (SLNN) method. Here all the equations are classified as singular initial value problems. To manage the singularity challenge, we employ an artificial neural network method. The approach utilizes a neural network of a single layer, where the hidden layer is omitted by enlarge the input using shifted Legendre polynomials. We apply a feed forward neural network model along with the principle of error back propagation. The effectiveness of the Legendre Neural Network model is demonstrated through LaneEmden equations.<|reference_end|>
arxiv
@article{patel2024an, title={An Efficient Method for Solving Lane Emden Equation using Legendre Neural Network}, author={Vijay Kumar Patel, Vivek Sharma, Nitin Kumar, Anoop Tiwari}, journal={arXiv preprint arXiv:2410.05409}, year={2024}, archivePrefix={arXiv}, eprint={2410.05409}, primaryClass={math.NA cs.NA} }
patel2024an
arxiv-666717
2410.05410
Enhanced Super-Resolution Training via Mimicked Alignment for Real-World Scenes
<|reference_start|>Enhanced Super-Resolution Training via Mimicked Alignment for Real-World Scenes: Image super-resolution methods have made significant strides with deep learning techniques and ample training data. However, they face challenges due to inherent misalignment between low-resolution (LR) and high-resolution (HR) pairs in real-world datasets. In this study, we propose a novel plug-and-play module designed to mitigate these misalignment issues by aligning LR inputs with HR images during training. Specifically, our approach involves mimicking a novel LR sample that aligns with HR while preserving the degradation characteristics of the original LR samples. This module seamlessly integrates with any SR model, enhancing robustness against misalignment. Importantly, it can be easily removed during inference, therefore without introducing any parameters on the conventional SR models. We comprehensively evaluate our method on synthetic and real-world datasets, demonstrating its effectiveness across a spectrum of SR models, including traditional CNNs and state-of-the-art Transformers. The source codes will be publicly made available at https://github.com/omarAlezaby/Mimicked_Ali .<|reference_end|>
arxiv
@article{elezabi2024enhanced, title={Enhanced Super-Resolution Training via Mimicked Alignment for Real-World Scenes}, author={Omar Elezabi, Zongwei Wu, Radu Timofte}, journal={arXiv preprint arXiv:2410.05410}, year={2024}, archivePrefix={arXiv}, eprint={2410.05410}, primaryClass={cs.CV eess.IV} }
elezabi2024enhanced
arxiv-666718
2410.05411
Constructing and Masking Preference Profile with LLMs for Filtering Discomforting Recommendation
<|reference_start|>Constructing and Masking Preference Profile with LLMs for Filtering Discomforting Recommendation: Personalized algorithms can inadvertently expose users to discomforting recommendations, potentially triggering negative consequences. The subjectivity of discomfort and the black-box nature of these algorithms make it challenging to effectively identify and filter such content. To address this, we first conducted a formative study to understand users' practices and expectations regarding discomforting recommendation filtering. Then, we designed a Large Language Model (LLM)-based tool named DiscomfortFilter, which constructs an editable preference profile for a user and helps the user express filtering needs through conversation to mask discomforting preferences within the profile. Based on the edited profile, DiscomfortFilter facilitates the discomforting recommendations filtering in a plug-and-play manner, maintaining flexibility and transparency. The constructed preference profile improves LLM reasoning and simplifies user alignment, enabling a 3.8B open-source LLM to rival top commercial models in an offline proxy task. A one-week user study with 24 participants demonstrated the effectiveness of DiscomfortFilter, while also highlighting its potential impact on platform recommendation outcomes. We conclude by discussing the ongoing challenges, highlighting its relevance to broader research, assessing stakeholder impact, and outlining future research directions.<|reference_end|>
arxiv
@article{liu2024constructing, title={Constructing and Masking Preference Profile with LLMs for Filtering Discomforting Recommendation}, author={Jiahao Liu, YiYang Shao, Peng Zhang, Dongsheng Li, Hansu Gu, Chao Chen, Longzhi Du, Tun Lu, and Ning Gu}, journal={arXiv preprint arXiv:2410.05411}, year={2024}, archivePrefix={arXiv}, eprint={2410.05411}, primaryClass={cs.IR cs.HC} }
liu2024constructing
arxiv-666719
2410.05412
Robust Matrix Completion with Deterministic Sampling via Convex Optimization
<|reference_start|>Robust Matrix Completion with Deterministic Sampling via Convex Optimization: This paper deals with the problem of robust matrix completion -- retrieving a low-rank matrix and a sparse matrix from the compressed counterpart of their superposition. Though seemingly not an unresolved issue, we point out that the compressed matrix in our case is sampled in a deterministic pattern instead of those random ones on which existing studies depend. In fact, deterministic sampling is much more hardware-friendly than random ones. The limited resources on many platforms leave deterministic sampling the only choice to sense a matrix, resulting in the significance of investigating robust matrix completion with deterministic pattern. In such spirit, this paper proposes \textit{restricted approximate $\infty$-isometry property} and proves that, if a \textit{low-rank} and \textit{incoherent} square matrix and certain deterministic sampling pattern satisfy such property and two existing conditions called \textit{isomerism} and \textit{relative well-conditionedness}, the exact recovery from its sampled counterpart grossly corrupted by a small fraction of outliers via convex optimization happens with very high probability.<|reference_end|>
arxiv
@article{wang2024robust, title={Robust Matrix Completion with Deterministic Sampling via Convex Optimization}, author={Yinjian Wang}, journal={arXiv preprint arXiv:2410.05412}, year={2024}, archivePrefix={arXiv}, eprint={2410.05412}, primaryClass={cs.IT math.IT} }
wang2024robust
arxiv-666720
2410.05413
Implicitly Learned Neural Phase Functions for Basis-Free Point Spread Function Engineering
<|reference_start|>Implicitly Learned Neural Phase Functions for Basis-Free Point Spread Function Engineering: Point spread function (PSF) engineering is vital for precisely controlling the focus of light in computational imaging, with applications in neural imaging, fluorescence microscopy, and biophotonics. The PSF is derived from the magnitude of the Fourier transform of a phase function, making the construction of the phase function given the PSF (PSF engineering) an ill-posed inverse problem. Traditional PSF engineering methods rely on physical basis functions, limiting their ability to generalize across the range of PSFs required for imaging tasks. We introduce a novel approach leveraging implicit neural representations that significantly outperforms existing pixel-wise optimization methods in phase function quality.<|reference_end|>
arxiv
@article{valouev2024implicitly, title={Implicitly Learned Neural Phase Functions for Basis-Free Point Spread Function Engineering}, author={Aleksey Valouev}, journal={arXiv preprint arXiv:2410.05413}, year={2024}, archivePrefix={arXiv}, eprint={2410.05413}, primaryClass={physics.optics cs.CV} }
valouev2024implicitly
arxiv-666721
2410.05414
Positive bias makes tensor-network contraction tractable
<|reference_start|>Positive bias makes tensor-network contraction tractable: Tensor network contraction is a powerful computational tool in quantum many-body physics, quantum information and quantum chemistry. The complexity of contracting a tensor network is thought to mainly depend on its entanglement properties, as reflected by the Schmidt rank across bipartite cuts. Here, we study how the complexity of tensor-network contraction depends on a different notion of quantumness, namely, the sign structure of its entries. We tackle this question rigorously by investigating the complexity of contracting tensor networks whose entries have a positive bias. We show that for intermediate bond dimension d>~n, a small positive mean value >~1/d of the tensor entries already dramatically decreases the computational complexity of approximately contracting random tensor networks, enabling a quasi-polynomial time algorithm for arbitrary 1/poly(n) multiplicative approximation. At the same time exactly contracting such tensor networks remains #P-hard, like for the zero-mean case [HHEG20]. The mean value 1/d matches the phase transition point observed in [CJHS24]. Our proof makes use of Barvinok's method for approximate counting and the technique of mapping random instances to statistical mechanical models. We further consider the worst-case complexity of approximate contraction of positive tensor networks, where all entries are non-negative. We first give a simple proof showing that a multiplicative approximation with error exponentially close to one is at least StoqMA-hard. We then show that when considering additive error in the matrix 1-norm, the contraction of positive tensor network is BPP-Complete. This result compares to Arad and Landau's [AL10] result, which shows that for general tensor networks, approximate contraction up to matrix 2-norm additive error is BQP-Complete.<|reference_end|>
arxiv
@article{jiang2024positive, title={Positive bias makes tensor-network contraction tractable}, author={Jiaqing Jiang, Jielun Chen, Norbert Schuch, Dominik Hangleiter}, journal={arXiv preprint arXiv:2410.05414}, year={2024}, archivePrefix={arXiv}, eprint={2410.05414}, primaryClass={quant-ph cs.CC cs.DS} }
jiang2024positive
arxiv-666722
2410.05416
Haste Makes Waste: A Simple Approach for Scaling Graph Neural Networks
<|reference_start|>Haste Makes Waste: A Simple Approach for Scaling Graph Neural Networks: Graph neural networks (GNNs) have demonstrated remarkable success in graph representation learning, and various sampling approaches have been proposed to scale GNNs to applications with large-scale graphs. A class of promising GNN training algorithms take advantage of historical embeddings to reduce the computation and memory cost while maintaining the model expressiveness of GNNs. However, they incur significant computation bias due to the stale feature history. In this paper, we provide a comprehensive analysis of their staleness and inferior performance on large-scale problems. Motivated by our discoveries, we propose a simple yet highly effective training algorithm (REST) to effectively reduce feature staleness, which leads to significantly improved performance and convergence across varying batch sizes. The proposed algorithm seamlessly integrates with existing solutions, boasting easy implementation, while comprehensive experiments underscore its superior performance and efficiency on large-scale benchmarks. Specifically, our improvements to state-of-the-art historical embedding methods result in a 2.7% and 3.6% performance enhancement on the ogbn-papers100M and ogbn-products dataset respectively, accompanied by notably accelerated convergence.<|reference_end|>
arxiv
@article{xue2024haste, title={Haste Makes Waste: A Simple Approach for Scaling Graph Neural Networks}, author={Rui Xue, Tong Zhao, Neil Shah, Xiaorui Liu}, journal={arXiv preprint arXiv:2410.05416}, year={2024}, archivePrefix={arXiv}, eprint={2410.05416}, primaryClass={cs.LG} }
xue2024haste
arxiv-666723
2410.05417
STOP! Camera Spoofing via the in-Vehicle IP Network
<|reference_start|>STOP! Camera Spoofing via the in-Vehicle IP Network: Autonomous driving and advanced driver assistance systems (ADAS) rely on cameras to control the driving. In many prior approaches an attacker aiming to stop the vehicle had to send messages on the specialized and better-defended CAN bus. We suggest an easier alternative: manipulate the IP-based network communication between the camera and the ADAS logic, inject fake images of stop signs or red lights into the video stream, and let the ADAS stop the car safely. We created an attack tool that successfully exploits the GigE Vision protocol. Then we analyze two classes of passive anomaly detectors to identify such attacks: protocol-based detectors and video-based detectors. We implemented multiple detectors of both classes and evaluated them on data collected from our test vehicle and also on data from the public BDD corpus. Our results show that such detectors are effective against naive adversaries, but sophisticated adversaries can evade detection. Finally, we propose a novel class of active defense mechanisms that randomly adjust camera parameters during the video transmission, and verify that the received images obey the requested adjustments. Within this class we focus on a specific implementation, the width-varying defense, which randomly modifies the width of every frame. Beyond its function as an anomaly detector, this defense is also a protective measure against certain attacks: by distorting injected image patches it prevents their recognition by the ADAS logic. We demonstrate the effectiveness of the width-varying defense through theoretical analysis and by an extensive evaluation of several types of attack in a wide range of realistic road driving conditions. The best the attack was able to achieve against this defense was injecting a stop sign for a duration of 0.2 seconds, with a success probability of 0.2%, whereas stopping a vehicle requires about 2.5 seconds.<|reference_end|>
arxiv
@article{peri2024stop!, title={STOP! Camera Spoofing via the in-Vehicle IP Network}, author={Dror Peri, Avishai Wool}, journal={arXiv preprint arXiv:2410.05417}, year={2024}, archivePrefix={arXiv}, eprint={2410.05417}, primaryClass={cs.CR} }
peri2024stop!
arxiv-666724
2410.05419
Refining Counterfactual Explanations With Joint-Distribution-Informed Shapley Towards Actionable Minimality
<|reference_start|>Refining Counterfactual Explanations With Joint-Distribution-Informed Shapley Towards Actionable Minimality: Counterfactual explanations (CE) identify data points that closely resemble the observed data but produce different machine learning (ML) model outputs, offering critical insights into model decisions. Despite the diverse scenarios, goals and tasks to which they are tailored, existing CE methods often lack actionable efficiency because of unnecessary feature changes included within the explanations that are presented to users and stakeholders. We address this problem by proposing a method that minimizes the required feature changes while maintaining the validity of CE, without imposing restrictions on models or CE algorithms, whether instance- or group-based. The key innovation lies in computing a joint distribution between observed and counterfactual data and leveraging it to inform Shapley values for feature attributions (FA). We demonstrate that optimal transport (OT) effectively derives this distribution, especially when the alignment between observed and counterfactual data is unclear in used CE methods. Additionally, a counterintuitive finding is uncovered: it may be misleading to rely on an exact alignment defined by the CE generation mechanism in conducting FA. Our proposed method is validated on extensive experiments across multiple datasets, showcasing its effectiveness in refining CE towards greater actionable efficiency.<|reference_end|>
arxiv
@article{you2024refining, title={Refining Counterfactual Explanations With Joint-Distribution-Informed Shapley Towards Actionable Minimality}, author={Lei You, Yijun Bian, Lele Cao}, journal={arXiv preprint arXiv:2410.05419}, year={2024}, archivePrefix={arXiv}, eprint={2410.05419}, primaryClass={cs.LG cs.AI stat.ME} }
you2024refining
arxiv-666725
2410.05423
Incorporating Talker Identity Aids With Improving Speech Recognition in Adversarial Environments
<|reference_start|>Incorporating Talker Identity Aids With Improving Speech Recognition in Adversarial Environments: Current state-of-the-art speech recognition models are trained to map acoustic signals into sub-lexical units. While these models demonstrate superior performance, they remain vulnerable to out-of-distribution conditions such as background noise and speech augmentations. In this work, we hypothesize that incorporating speaker representations during speech recognition can enhance model robustness to noise. We developed a transformer-based model that jointly performs speech recognition and speaker identification. Our model utilizes speech embeddings from Whisper and speaker embeddings from ECAPA-TDNN, which are processed jointly to perform both tasks. We show that the joint model performs comparably to Whisper under clean conditions. Notably, the joint model outperforms Whisper in high-noise environments, such as with 8-speaker babble background noise. Furthermore, our joint model excels in handling highly augmented speech, including sine-wave and noise-vocoded speech. Overall, these results suggest that integrating voice representations with speech recognition can lead to more robust models under adversarial conditions.<|reference_end|>
arxiv
@article{alavilli2024incorporating, title={Incorporating Talker Identity Aids With Improving Speech Recognition in Adversarial Environments}, author={Sagarika Alavilli, Annesya Banerjee, Gasser Elbanna, Annika Magaro}, journal={arXiv preprint arXiv:2410.05423}, year={2024}, archivePrefix={arXiv}, eprint={2410.05423}, primaryClass={cs.SD cs.AI eess.AS} }
alavilli2024incorporating
arxiv-666726
2410.05425
Designing a Classifier for Active Fire Detection from Multispectral Satellite Imagery Using Neural Architecture Search
<|reference_start|>Designing a Classifier for Active Fire Detection from Multispectral Satellite Imagery Using Neural Architecture Search: This paper showcases the use of a reinforcement learning-based Neural Architecture Search (NAS) agent to design a small neural network to perform active fire detection on multispectral satellite imagery. Specifically, we aim to design a neural network that can determine if a single multispectral pixel is a part of a fire, and do so within the constraints of a Low Earth Orbit (LEO) nanosatellite with a limited power budget, to facilitate on-board processing of sensor data. In order to use reinforcement learning, a reward function is needed. We supply this reward function in the shape of a regression model that predicts the F1 score obtained by a particular architecture, following quantization to INT8 precision, from purely architectural features. This model is trained by collecting a random sample of neural network architectures, training these architectures, and collecting their classification performance statistics. Besides the F1 score, we also include the total number of trainable parameters in our reward function to limit the size of the designed model and ensure it fits within the resource constraints imposed by nanosatellite platforms. Finally, we deployed the best neural network to the Google Coral Micro Dev Board and evaluated its inference latency and power consumption. This neural network consists of 1,716 trainable parameters, takes on average 984{\mu}s to inference, and consumes around 800mW to perform inference. These results show that our reinforcement learning-based NAS approach can be successfully applied to novel problems not tackled before.<|reference_end|>
arxiv
@article{cassimon2024designing, title={Designing a Classifier for Active Fire Detection from Multispectral Satellite Imagery Using Neural Architecture Search}, author={Amber Cassimon, Phil Reiter, Siegfried Mercelis, Kevin Mets}, journal={arXiv preprint arXiv:2410.05425}, year={2024}, archivePrefix={arXiv}, eprint={2410.05425}, primaryClass={cs.LG} }
cassimon2024designing
arxiv-666727
2410.05429
Diffusion Imitation from Observation
<|reference_start|>Diffusion Imitation from Observation: Learning from observation (LfO) aims to imitate experts by learning from state-only demonstrations without requiring action labels. Existing adversarial imitation learning approaches learn a generator agent policy to produce state transitions that are indistinguishable to a discriminator that learns to classify agent and expert state transitions. Despite its simplicity in formulation, these methods are often sensitive to hyperparameters and brittle to train. Motivated by the recent success of diffusion models in generative modeling, we propose to integrate a diffusion model into the adversarial imitation learning from observation framework. Specifically, we employ a diffusion model to capture expert and agent transitions by generating the next state, given the current state. Then, we reformulate the learning objective to train the diffusion model as a binary classifier and use it to provide "realness" rewards for policy learning. Our proposed framework, Diffusion Imitation from Observation (DIFO), demonstrates superior performance in various continuous control domains, including navigation, locomotion, manipulation, and games. Project page: https://nturobotlearninglab.github.io/DIFO<|reference_end|>
arxiv
@article{huang2024diffusion, title={Diffusion Imitation from Observation}, author={Bo-Ruei Huang, Chun-Kai Yang, Chun-Mao Lai, Dai-Jie Wu, Shao-Hua Sun}, journal={arXiv preprint arXiv:2410.05429}, year={2024}, archivePrefix={arXiv}, eprint={2410.05429}, primaryClass={cs.LG} }
huang2024diffusion
arxiv-666728
2410.05430
A Functional Extension of Semi-Structured Networks
<|reference_start|>A Functional Extension of Semi-Structured Networks: Semi-structured networks (SSNs) merge the structures familiar from additive models with deep neural networks, allowing the modeling of interpretable partial feature effects while capturing higher-order non-linearities at the same time. A significant challenge in this integration is maintaining the interpretability of the additive model component. Inspired by large-scale biomechanics datasets, this paper explores extending SSNs to functional data. Existing methods in functional data analysis are promising but often not expressive enough to account for all interactions and non-linearities and do not scale well to large datasets. Although the SSN approach presents a compelling potential solution, its adaptation to functional data remains complex. In this work, we propose a functional SSN method that retains the advantageous properties of classical functional regression approaches while also improving scalability. Our numerical experiments demonstrate that this approach accurately recovers underlying signals, enhances predictive performance, and performs favorably compared to competing methods.<|reference_end|>
arxiv
@article{rügamer2024a, title={A Functional Extension of Semi-Structured Networks}, author={David R"ugamer and Bernard X.W. Liew and Zainab Altai and Almond St"ocker}, journal={arXiv preprint arXiv:2410.05430}, year={2024}, archivePrefix={arXiv}, eprint={2410.05430}, primaryClass={cs.LG stat.AP stat.CO stat.ML} }
rügamer2024a
arxiv-666729
2410.05431
Continuous Ensemble Weather Forecasting with Diffusion models
<|reference_start|>Continuous Ensemble Weather Forecasting with Diffusion models: Weather forecasting has seen a shift in methods from numerical simulations to data-driven systems. While initial research in the area focused on deterministic forecasting, recent works have used diffusion models to produce skillful ensemble forecasts. These models are trained on a single forecasting step and rolled out autoregressively. However, they are computationally expensive and accumulate errors for high temporal resolution due to the many rollout steps. We address these limitations with Continuous Ensemble Forecasting, a novel and flexible method for sampling ensemble forecasts in diffusion models. The method can generate temporally consistent ensemble trajectories completely in parallel, with no autoregressive steps. Continuous Ensemble Forecasting can also be combined with autoregressive rollouts to yield forecasts at an arbitrary fine temporal resolution without sacrificing accuracy. We demonstrate that the method achieves competitive results for global weather forecasting with good probabilistic properties.<|reference_end|>
arxiv
@article{andrae2024continuous, title={Continuous Ensemble Weather Forecasting with Diffusion models}, author={Martin Andrae, Tomas Landelius, Joel Oskarsson, Fredrik Lindsten}, journal={arXiv preprint arXiv:2410.05431}, year={2024}, archivePrefix={arXiv}, eprint={2410.05431}, primaryClass={cs.LG physics.ao-ph} }
andrae2024continuous
arxiv-666730
2410.05432
Modeling Buffer Occupancy in bittide Systems
<|reference_start|>Modeling Buffer Occupancy in bittide Systems: The bittide mechanism enables logically synchronous computation across distributed systems by leveraging the continuous frame transmission inherent to wired networks such as Ethernet. Instead of relying on a global clock, bittide uses a decentralized control system to adjust local clock frequencies, ensuring all nodes operate with a consistent notion of time by utilizing elastic buffers at each node to absorb frequency variations. This paper presents an analysis of the steady-state occupancy of these elastic buffers, a critical factor influencing system latency. Using a fluid model of the bittide system, we prove that buffer occupancy converges and derive an explicit formula for the steady-state value in terms of system parameters, including network topology, physical latencies, and controller gains. This analysis provides valuable insights for optimizing buffer sizes and minimizing latency in bittide-based distributed systems.<|reference_end|>
arxiv
@article{lall2024modeling, title={Modeling Buffer Occupancy in bittide Systems}, author={Sanjay Lall and Tammo Spalink}, journal={arXiv preprint arXiv:2410.05432}, year={2024}, archivePrefix={arXiv}, eprint={2410.05432}, primaryClass={eess.SY cs.SY} }
lall2024modeling
arxiv-666731
2410.05433
2FAST-2LAMAA: A Lidar-Inertial Localisation and Mapping Framework for Non-Static Environments
<|reference_start|>2FAST-2LAMAA: A Lidar-Inertial Localisation and Mapping Framework for Non-Static Environments: This document presents a framework for lidar-inertial localisation and mapping named 2Fast-2Lamaa. The method revolves around two main steps which are the inertial-aided undistortion of the lidar data and the scan-to-map registration using a distance-field representation of the environment. The initialisation-free undistortion uses inertial data to constrain the continuous trajectory of the sensor during the lidar scan. The eleven DoFs that fully characterise the trajectory are estimated by minimising lidar point-to-line and point-to-plane distances in a non-linear least-square formulation. The registration uses a map that provides a distance field for the environment based on Gaussian Process regression. The pose of an undistorted lidar scan is optimised to minimise the distance field queries of its points with respect to the map. After registration, the new geometric information is efficiently integrated into the map. The soundness of 2Fast-2Lamaa is demonstrated over several datasets (qualitative evaluation only). The real-time implementation is made publicly available at https://github.com/UTS-RI/2fast2lamaa.<|reference_end|>
arxiv
@article{gentil20242fast-2lamaa:, title={2FAST-2LAMAA: A Lidar-Inertial Localisation and Mapping Framework for Non-Static Environments}, author={Cedric Le Gentil, Raphael Falque, Teresa Vidal-Calleja}, journal={arXiv preprint arXiv:2410.05433}, year={2024}, archivePrefix={arXiv}, eprint={2410.05433}, primaryClass={cs.RO} }
gentil20242fast-2lamaa:
arxiv-666732
2410.05434
Better than Your Teacher: LLM Agents that learn from Privileged AI Feedback
<|reference_start|>Better than Your Teacher: LLM Agents that learn from Privileged AI Feedback: While large language models (LLMs) show impressive decision-making abilities, current methods lack a mechanism for automatic self-improvement from errors during task execution. We propose LEAP, an iterative fine-tuning framework that continually improves LLM agents using feedback from AI expert teachers. Our key insight is to equip the expert teachers with a privileged state -- information that is available during training but hidden at test time. This allows even weak experts to provide precise guidance, significantly improving the student agent's performance without access to privileged information at test time. We evaluate LEAP on diverse decision-making benchmarks, including text-based games (ALFWorld), web navigation (WebShop), and interactive coding (Intercode Bash). Our experiments show that LEAP (1) outperforms behavior cloning and ReAct baselines (2) enables weak student models (e.g., Llama3-8B) to exceed the performance of strong teacher models (GPT4-o), and (3) allows weak models to self-improve using privileged versions of themselves. We also provide a theoretical analysis showing that LEAP's success hinges on balancing privileged information with the student's realizability, which we empirically validate. Our code is available at https://leap-llm.github.io<|reference_end|>
arxiv
@article{choudhury2024better, title={Better than Your Teacher: LLM Agents that learn from Privileged AI Feedback}, author={Sanjiban Choudhury, Paloma Sodhi}, journal={arXiv preprint arXiv:2410.05434}, year={2024}, archivePrefix={arXiv}, eprint={2410.05434}, primaryClass={cs.LG cs.AI} }
choudhury2024better
arxiv-666733
2410.05435
Salient Store: Enabling Smart Storage for Continuous Learning Edge Servers
<|reference_start|>Salient Store: Enabling Smart Storage for Continuous Learning Edge Servers: As continuous learning based video analytics continue to evolve, the role of efficient edge servers in efficiently managing vast and dynamic datasets is becoming increasingly crucial. Unlike their compute architecture, storage and archival system for these edge servers has often been under-emphasized. This is unfortunate as they contribute significantly to the data management and data movement, especially in a emerging complute landscape where date storage and data protection has become one of the key concerns. To mitigate this, we propose Salient Store that specifically focuses on the integration of Computational Storage Devices (CSDs) into edge servers to enhance data processing and management, particularly in continuous learning scenarios, prevalent in fields such as autonomous driving and urban mobility. Our research, gos beyond the compute domain, and identifies the gaps in current storage system designs. We proposes a framework that aligns more closely with the growing data demands. We present a detailed analysis of data movement challenges within the archival workflows and demonstrate how the strategic integration of CSDs can significantly optimize data compression, encryption, as well as other data management tasks, to improve overall system performance. By leveraging the parallel processing capabilities of FPGAs and the high internal bandwidth of SSDs, Salient Store reduces the communication latency and data volume by ~6.2x and ~6.1x, respectively. This paper provides a comprehensive overview of the potential of CSDs to revolutionize storage, making them not just data repositories but active participants in the computational process.<|reference_end|>
arxiv
@article{mishra2024salient, title={Salient Store: Enabling Smart Storage for Continuous Learning Edge Servers}, author={Cyan Subhra Mishra, Deeksha Chaudhary, Jack Sampson, Mahmut Taylan Knademir, Chita Das}, journal={arXiv preprint arXiv:2410.05435}, year={2024}, archivePrefix={arXiv}, eprint={2410.05435}, primaryClass={cs.AR} }
mishra2024salient
arxiv-666734
2410.05436
Discovering distinctive elements of biomedical datasets for high-performance exploration
<|reference_start|>Discovering distinctive elements of biomedical datasets for high-performance exploration: The human brain represents an object by small elements and distinguishes two objects based on the difference in elements. Discovering the distinctive elements of high-dimensional datasets is therefore critical in numerous perception-driven biomedical and clinical studies. However, currently there is no available method for reliable extraction of distinctive elements of high-dimensional biomedical and clinical datasets. Here we present an unsupervised deep learning technique namely distinctive element analysis (DEA), which extracts the distinctive data elements using high-dimensional correlative information of the datasets. DEA at first computes a large number of distinctive parts of the data, then filters and condenses the parts into DEA elements by employing a unique kernel-driven triple-optimization network. DEA has been found to improve the accuracy by up to 45% in comparison to the traditional techniques in applications such as disease detection from medical images, gene ranking and cell recognition from single cell RNA sequence (scRNA-seq) datasets. Moreover, DEA allows user-guided manipulation of the intermediate calculation process and thus offers intermediate results with better interpretability.<|reference_end|>
arxiv
@article{islam2024discovering, title={Discovering distinctive elements of biomedical datasets for high-performance exploration}, author={Md Tauhidul Islam and Lei Xing}, journal={arXiv preprint arXiv:2410.05436}, year={2024}, archivePrefix={arXiv}, eprint={2410.05436}, primaryClass={cs.CV} }
islam2024discovering
arxiv-666735
2410.05437
ESPACE: Dimensionality Reduction of Activations for Model Compression
<|reference_start|>ESPACE: Dimensionality Reduction of Activations for Model Compression: We propose ESPACE, an LLM compression technique based on dimensionality reduction of activations. Unlike prior works on weight-centric tensor decomposition, ESPACE projects activations onto a pre-calibrated set of principal components. The activation-centrality of the approach enables retraining LLMs with no loss of expressivity; while at inference, weight decomposition is obtained as a byproduct of matrix multiplication associativity. Theoretical results on the construction of projection matrices with optimal computational accuracy are provided. Experimentally, we find ESPACE enables 50% compression of GPT3, Llama2, and Nemotron4 models with small accuracy degradation, as low as a 0.18 perplexity increase on GPT3-22B. At lower compression rates of 20% to 40%, ESPACE drives GPT3 models to outperforming their baseline, by up to a 0.38 decrease in perplexity for GPT3-8B. ESPACE also reduces GEMM execution time and prefill inference latency on existing hardware. Comparison with related works on compressing Llama2-7B via matrix factorization shows that ESPACE is a first step in advancing the state-of-the-art in tensor decomposition compression of LLMs.<|reference_end|>
arxiv
@article{sakr2024espace:, title={ESPACE: Dimensionality Reduction of Activations for Model Compression}, author={Charbel Sakr and Brucek Khailany}, journal={arXiv preprint arXiv:2410.05437}, year={2024}, archivePrefix={arXiv}, eprint={2410.05437}, primaryClass={cs.LG} }
sakr2024espace:
arxiv-666736
2410.05438
DAAL: Density-Aware Adaptive Line Margin Loss for Multi-Modal Deep Metric Learning
<|reference_start|>DAAL: Density-Aware Adaptive Line Margin Loss for Multi-Modal Deep Metric Learning: Multi-modal deep metric learning is crucial for effectively capturing diverse representations in tasks such as face verification, fine-grained object recognition, and product search. Traditional approaches to metric learning, whether based on distance or margin metrics, primarily emphasize class separation, often overlooking the intra-class distribution essential for multi-modal feature learning. In this context, we propose a novel loss function called Density-Aware Adaptive Margin Loss(DAAL), which preserves the density distribution of embeddings while encouraging the formation of adaptive sub-clusters within each class. By employing an adaptive line strategy, DAAL not only enhances intra-class variance but also ensures robust inter-class separation, facilitating effective multi-modal representation. Comprehensive experiments on benchmark fine-grained datasets demonstrate the superior performance of DAAL, underscoring its potential in advancing retrieval applications and multi-modal deep metric learning.<|reference_end|>
arxiv
@article{gebrerufael2024daal:, title={DAAL: Density-Aware Adaptive Line Margin Loss for Multi-Modal Deep Metric Learning}, author={Hadush Hailu Gebrerufael, Anil Kumar Tiwari, Gaurav Neupane, Goitom Ybrah Hailu}, journal={arXiv preprint arXiv:2410.05438}, year={2024}, archivePrefix={arXiv}, eprint={2410.05438}, primaryClass={cs.CV cs.LG} }
gebrerufael2024daal:
arxiv-666737
2410.05439
Barycentric interpolation formulas for the sphere and the disk
<|reference_start|>Barycentric interpolation formulas for the sphere and the disk: Spherical and polar geometries arise in many important areas of computational science, including weather and climate forecasting, optics, and astrophysics. In these applications, tensor-product grids are often used to represent unknowns. However, interpolation schemes that exploit the tensor-product structure can introduce artificial boundaries at the poles in spherical coordinates and at the origin in polar coordinates, leading to numerical challenges, especially for high-order methods. In this paper, we present new bivariate trigonometric barycentric interpolation formulas for spheres and bivariate trigonometric/polynomial barycentric formulas for disks, designed to overcome these issues. These formulas are also efficient, as they only rely on a set of (precomputed) weights that depend on the grid structure and not the data itself. The formulas are based on the Double Fourier Sphere (DFS) method, which transforms the sphere into a doubly periodic domain and the disk into a domain without an artificial boundary at the origin. For standard tensor-product grids, the proposed formulas exhibit exponential convergence when approximating smooth functions. We provide numerical results to demonstrate these convergence rates and showcase an application of the spherical barycentric formulas in a semi-Lagrangian advection scheme for solving the tracer transport equation on the sphere.<|reference_end|>
arxiv
@article{chiwere2024barycentric, title={Barycentric interpolation formulas for the sphere and the disk}, author={Michael Chiwere and Grady B. Wright}, journal={arXiv preprint arXiv:2410.05439}, year={2024}, archivePrefix={arXiv}, eprint={2410.05439}, primaryClass={math.NA cs.NA} }
chiwere2024barycentric
arxiv-666738
2410.05440
Can LLMs Understand Time Series Anomalies?
<|reference_start|>Can LLMs Understand Time Series Anomalies?: Large Language Models (LLMs) have gained popularity in time series forecasting, but their potential for anomaly detection remains largely unexplored. Our study investigates whether LLMs can understand and detect anomalies in time series data, focusing on zero-shot and few-shot scenarios. Inspired by conjectures about LLMs' behavior from time series forecasting research, we formulate key hypotheses about LLMs' capabilities in time series anomaly detection. We design and conduct principled experiments to test each of these hypotheses. Our investigation reveals several surprising findings about LLMs for time series: 1. LLMs understand time series better as *images* rather than as text 2. LLMs did not demonstrate enhanced performance when prompted to engage in *explicit reasoning* about time series analysis 3. Contrary to common beliefs, LLM's understanding of time series *do not* stem from their repetition biases or arithmetic abilities 4. LLMs' behaviors and performance in time series analysis *vary significantly* across different model architectures This study provides the first comprehensive analysis of contemporary LLM capabilities in time series anomaly detection. Our results suggest that while LLMs can understand time series anomalies, many common conjectures based on their reasoning capabilities do not hold. These insights pave the way for more effective LLM-based approaches in time series analysis, bridging the gap between forecasting and anomaly detection applications.<|reference_end|>
arxiv
@article{zhou2024can, title={Can LLMs Understand Time Series Anomalies?}, author={Zihao Zhou, Rose Yu}, journal={arXiv preprint arXiv:2410.05440}, year={2024}, archivePrefix={arXiv}, eprint={2410.05440}, primaryClass={cs.LG} }
zhou2024can
arxiv-666739
2410.05441
Thompson Sampling For Combinatorial Bandits: Polynomial Regret and Mismatched Sampling Paradox
<|reference_start|>Thompson Sampling For Combinatorial Bandits: Polynomial Regret and Mismatched Sampling Paradox: We consider Thompson Sampling (TS) for linear combinatorial semi-bandits and subgaussian rewards. We propose the first known TS whose finite-time regret does not scale exponentially with the dimension of the problem. We further show the "mismatched sampling paradox": A learner who knows the rewards distributions and samples from the correct posterior distribution can perform exponentially worse than a learner who does not know the rewards and simply samples from a well-chosen Gaussian posterior. The code used to generate the experiments is available at https://github.com/RaymZhang/CTS-Mismatched-Paradox<|reference_end|>
arxiv
@article{zhang2024thompson, title={Thompson Sampling For Combinatorial Bandits: Polynomial Regret and Mismatched Sampling Paradox}, author={Raymond Zhang and Richard Combes}, journal={arXiv preprint arXiv:2410.05441}, year={2024}, archivePrefix={arXiv}, eprint={2410.05441}, primaryClass={stat.ML cs.LG} }
zhang2024thompson
arxiv-666740
2410.05443
A Deep Learning-Based Approach for Mangrove Monitoring
<|reference_start|>A Deep Learning-Based Approach for Mangrove Monitoring: Mangroves are dynamic coastal ecosystems that are crucial to environmental health, economic stability, and climate resilience. The monitoring and preservation of mangroves are of global importance, with remote sensing technologies playing a pivotal role in these efforts. The integration of cutting-edge artificial intelligence with satellite data opens new avenues for ecological monitoring, potentially revolutionizing conservation strategies at a time when the protection of natural resources is more crucial than ever. The objective of this work is to provide a comprehensive evaluation of recent deep-learning models on the task of mangrove segmentation. We first introduce and make available a novel open-source dataset, MagSet-2, incorporating mangrove annotations from the Global Mangrove Watch and satellite images from Sentinel-2, from mangrove positions all over the world. We then benchmark three architectural groups, namely convolutional, transformer, and mamba models, using the created dataset. The experimental outcomes further validate the deep learning community's interest in the Mamba model, which surpasses other architectures in all metrics.<|reference_end|>
arxiv
@article{de souza2024a, title={A Deep Learning-Based Approach for Mangrove Monitoring}, author={Lucas Jos'e Vel^oso de Souza, Ingrid Valverde Reis Zreik, Adrien Salem-Sermanet, Nac'era Seghouani, Lionel Pourchier}, journal={arXiv preprint arXiv:2410.05443}, year={2024}, archivePrefix={arXiv}, eprint={2410.05443}, primaryClass={cs.CV eess.IV} }
de souza2024a
arxiv-666741
2410.05444
Online scalable Gaussian processes with conformal prediction for guaranteed coverage
<|reference_start|>Online scalable Gaussian processes with conformal prediction for guaranteed coverage: The Gaussian process (GP) is a Bayesian nonparametric paradigm that is widely adopted for uncertainty quantification (UQ) in a number of safety-critical applications, including robotics, healthcare, as well as surveillance. The consistency of the resulting uncertainty values however, hinges on the premise that the learning function conforms to the properties specified by the GP model, such as smoothness, periodicity and more, which may not be satisfied in practice, especially with data arriving on the fly. To combat against such model mis-specification, we propose to wed the GP with the prevailing conformal prediction (CP), a distribution-free post-processing framework that produces it prediction sets with a provably valid coverage under the sole assumption of data exchangeability. However, this assumption is usually violated in the online setting, where a prediction set is sought before revealing the true label. To ensure long-term coverage guarantee, we will adaptively set the key threshold parameter based on the feedback whether the true label falls inside the prediction set. Numerical results demonstrate the merits of the online GP-CP approach relative to existing alternatives in the long-term coverage performance.<|reference_end|>
arxiv
@article{xu2024online, title={Online scalable Gaussian processes with conformal prediction for guaranteed coverage}, author={Jinwen Xu, Qin Lu and Georgios B. Giannakis}, journal={arXiv preprint arXiv:2410.05444}, year={2024}, archivePrefix={arXiv}, eprint={2410.05444}, primaryClass={cs.LG stat.ME stat.ML} }
xu2024online
arxiv-666742
2410.05445
Data-Driven Discovery of Conservation Laws from Trajectories via Neural Deflation
<|reference_start|>Data-Driven Discovery of Conservation Laws from Trajectories via Neural Deflation: In an earlier work by a subset of the present authors, the method of the so-called neural deflation was introduced towards identifying a complete set of functionally independent conservation laws of a nonlinear dynamical system. Here, we extend by a significant step this proposal. Instead of using the explicit knowledge of the underlying equations of motion, we develop the method directly from system trajectories. This is crucial towards enhancing the practical implementation of the method in scenarios where solely data reflecting discrete snapshots of the system are available. We showcase the results of the method and the number of associated conservation laws obtained in a diverse range of examples including 1D and 2D harmonic oscillators, the Toda lattice, the Fermi-Pasta-Ulam-Tsingou lattice and the Calogero-Moser system.<|reference_end|>
arxiv
@article{chen2024data-driven, title={Data-Driven Discovery of Conservation Laws from Trajectories via Neural Deflation}, author={Shaoxuan Chen, Panayotis G. Kevrekidis, Hong-Kun Zhang, Wei Zhu}, journal={arXiv preprint arXiv:2410.05445}, year={2024}, archivePrefix={arXiv}, eprint={2410.05445}, primaryClass={nlin.PS cs.LG} }
chen2024data-driven
arxiv-666743
2410.05446
Stability of sorting based embeddings
<|reference_start|>Stability of sorting based embeddings: Consider a group $G$ of order $M$ acting unitarily on a real inner product space $V$. We show that the sorting based embedding obtained by applying a general linear map $\alpha : \mathbb{R}^{M \times N} \to \mathbb{R}^D$ to the invariant map $\beta_\Phi : V \to \mathbb{R}^{M \times N}$ given by sorting the coorbits $(\langle v, g \phi_i \rangle_V)_{g \in G}$, where $(\phi_i)_{i=1}^N \in V$, satisfies a bi-Lipschitz condition if and only if it separates orbits. Additionally, we note that any invariant Lipschitz continuous map (into a Hilbert space) factors through the sorting based embedding, and that any invariant continuous map (into a locally convex space) factors through the sorting based embedding as well.<|reference_end|>
arxiv
@article{balan2024stability, title={Stability of sorting based embeddings}, author={Radu Balan, Efstratios Tsoukanis and Matthias Wellershoff}, journal={arXiv preprint arXiv:2410.05446}, year={2024}, archivePrefix={arXiv}, eprint={2410.05446}, primaryClass={math.FA cs.LG} }
balan2024stability
arxiv-666744
2410.05447
Propeller damage detection, classification and estimation in multirotor vehicles
<|reference_start|>Propeller damage detection, classification and estimation in multirotor vehicles: This manuscript details an architecture and training methodology for a data-driven framework aimed at detecting, identifying, and quantifying damage in the propeller blades of multirotor Unmanned Aerial Vehicles. By substituting one propeller with a damaged counterpart-encompassing three distinct damage types of varying severity-real flight data was collected. This data was then used to train a composite model, comprising both classifiers and neural networks, capable of accurately identifying the type of failure, estimating damage severity, and pinpointing the affected rotor. The data employed for this analysis was exclusively sourced from inertial measurements and control command inputs, ensuring adaptability across diverse multirotor vehicle platforms.<|reference_end|>
arxiv
@article{pose2024propeller, title={Propeller damage detection, classification and estimation in multirotor vehicles}, author={Claudio Pose, Juan Giribet, Gabriel Torre}, journal={arXiv preprint arXiv:2410.05447}, year={2024}, archivePrefix={arXiv}, eprint={2410.05447}, primaryClass={cs.RO cs.SY eess.SY} }
pose2024propeller
arxiv-666745
2410.05448
Task Diversity Shortens the ICL Plateau
<|reference_start|>Task Diversity Shortens the ICL Plateau: In-context learning (ICL) describes a language model's ability to generate outputs based on a set of input demonstrations and a subsequent query. To understand this remarkable capability, researchers have studied simplified, stylized models. These studies have consistently observed long loss plateaus, during which models exhibit minimal improvement, followed by a sudden, rapid surge of learning. In this work, we reveal that training on multiple diverse ICL tasks simultaneously shortens the loss plateaus, making each task easier to learn. This finding is surprising as it contradicts the natural intuition that the combined complexity of multiple ICL tasks would lengthen the learning process, not shorten it. Our result suggests that the recent success in large-scale training of language models may be attributed not only to the richness of the data at scale but also to the easier optimization (training) induced by the diversity of natural language training data.<|reference_end|>
arxiv
@article{kim2024task, title={Task Diversity Shortens the ICL Plateau}, author={Jaeyeon Kim, Sehyun Kwon, Joo Young Choi, Jongho Park, Jaewoong Cho, Jason D. Lee, Ernest K. Ryu}, journal={arXiv preprint arXiv:2410.05448}, year={2024}, archivePrefix={arXiv}, eprint={2410.05448}, primaryClass={cs.LG cs.CL} }
kim2024task
arxiv-666746
2410.05449
Skin Controlled Electronic and Neuromorphic Tattoos
<|reference_start|>Skin Controlled Electronic and Neuromorphic Tattoos: Wearable human activity sensors developed in the past decade show a distinct trend of becoming thinner and more imperceptible while retaining their electrical qualities, with graphene e-tattoos, as the ultimate example. A persistent challenge in modern wearables, however, is signal degradation due to the distance between the sensor's recording site and the signal transmission medium. To address this, we propose here to directly utilize human skin as a signal transmission medium as well as using low-cost gel electrodes for rapid probing of 2D transistor-based wearables. We demonstrate that the hypodermis layer of the skin can effectively serve as an electrolyte, enabling electrical potential application to semiconducting films made from graphene and other 2D materials placed on top of the skin. Graphene transistor tattoos, when biased through the body, exhibit high charge carrier mobility (up to 6500 2V-1s-1), with MoS2 and PtSe2 transistors showing mobilities up to 30 cm2V-1s-1 and 1 cm2V-1s-1, respectively. Finally, by introducing a layer of Nafion to the device structure, we observed neuromorphic functionality, transforming these e-tattoos into neuromorphic bioelectronic devices controlled through the skin itself. The neuromorphic bioelectronic tattoos have the potential for developing self-aware and stand-alone smart wearables, crucial for understanding and improving overall human performance.<|reference_end|>
arxiv
@article{kireev2024skin, title={Skin Controlled Electronic and Neuromorphic Tattoos}, author={Dmitry Kireev, Nandu Koripally, Samuel Liu, Gabriella Coloyan Fleming, Philip Varkey, Joseph Belle, Sivasakthya Mohan, Sang Sub Han, Dong Xu, Yeonwoong Jung, Xiangfeng Duan, Jean Anne C. Incorvia, Deji Akinwande}, journal={arXiv preprint arXiv:2410.05449}, year={2024}, archivePrefix={arXiv}, eprint={2410.05449}, primaryClass={cs.HC} }
kireev2024skin
arxiv-666747
2410.05450
AI-Driven Early Mental Health Screening with Limited Data: Analyzing Selfies of Pregnant Women
<|reference_start|>AI-Driven Early Mental Health Screening with Limited Data: Analyzing Selfies of Pregnant Women: Major Depressive Disorder and anxiety disorders affect millions globally, contributing significantly to the burden of mental health issues. Early screening is crucial for effective intervention, as timely identification of mental health issues can significantly improve treatment outcomes. Artificial intelligence (AI) can be valuable for improving the screening of mental disorders, enabling early intervention and better treatment outcomes. AI-driven screening can leverage the analysis of multiple data sources, including facial features in digital images. However, existing methods often rely on controlled environments or specialized equipment, limiting their broad applicability. This study explores the potential of AI models for ubiquitous depression-anxiety screening given face-centric selfies. The investigation focuses on high-risk pregnant patients, a population that is particularly vulnerable to mental health issues. To cope with limited training data resulting from our clinical setup, pre-trained models were utilized in two different approaches: fine-tuning convolutional neural networks (CNNs) originally designed for facial expression recognition and employing vision-language models (VLMs) for zero-shot analysis of facial expressions. Experimental results indicate that the proposed VLM-based method significantly outperforms CNNs, achieving an accuracy of 77.6% and an F1-score of 56.0%. Although there is significant room for improvement, the results suggest that VLMs can be a promising approach for mental health screening, especially in scenarios with limited data.<|reference_end|>
arxiv
@article{basílio2024ai-driven, title={AI-Driven Early Mental Health Screening with Limited Data: Analyzing Selfies of Pregnant Women}, author={Gustavo A. Bas'ilio, Thiago B. Pereira, Alessandro L. Koerich, Ludmila Dias, Maria das Grac{c}as da S. Teixeira, Rafael T. Sousa, Wilian H. Hisatugu, Amanda S. Mota, Anilton S. Garcia, Marco Aur'elio K. Galletta, Hermano Tavares, Thiago M. Paix~ao}, journal={arXiv preprint arXiv:2410.05450}, year={2024}, archivePrefix={arXiv}, eprint={2410.05450}, primaryClass={cs.CV cs.AI cs.LG} }
basílio2024ai-driven
arxiv-666748
2410.05451
Aligning LLMs to Be Robust Against Prompt Injection
<|reference_start|>Aligning LLMs to Be Robust Against Prompt Injection: Large language models (LLMs) are becoming increasingly prevalent in modern software systems, interfacing between the user and the internet to assist with tasks that require advanced language understanding. To accomplish these tasks, the LLM often uses external data sources such as user documents, web retrieval, results from API calls, etc. This opens up new avenues for attackers to manipulate the LLM via prompt injection. Adversarial prompts can be carefully crafted and injected into external data sources to override the user's intended instruction and instead execute a malicious instruction. Prompt injection attacks constitute a major threat to LLM security, making the design and implementation of practical countermeasures of paramount importance. To this end, we show that alignment can be a powerful tool to make LLMs more robust against prompt injection. Our method -- SecAlign -- first builds an alignment dataset by simulating prompt injection attacks and constructing pairs of desirable and undesirable responses. Then, we apply existing alignment techniques to fine-tune the LLM to be robust against these simulated attacks. Our experiments show that SecAlign robustifies the LLM substantially with a negligible hurt on model utility. Moreover, SecAlign's protection generalizes to strong attacks unseen in training. Specifically, the success rate of state-of-the-art GCG-based prompt injections drops from 56% to 2% in Mistral-7B after our alignment process. Our code is released at https://github.com/facebookresearch/SecAlign<|reference_end|>
arxiv
@article{chen2024aligning, title={Aligning LLMs to Be Robust Against Prompt Injection}, author={Sizhe Chen, Arman Zharmagambetov, Saeed Mahloujifar, Kamalika Chaudhuri, Chuan Guo}, journal={arXiv preprint arXiv:2410.05451}, year={2024}, archivePrefix={arXiv}, eprint={2410.05451}, primaryClass={cs.CR cs.LG} }
chen2024aligning
arxiv-666749
2410.05452
Automatic Identification and Visualization of Group Training Activities Using Wearable Data
<|reference_start|>Automatic Identification and Visualization of Group Training Activities Using Wearable Data: Human Activity Recognition (HAR) identifies daily activities from time-series data collected by wearable devices like smartwatches. Recent advancements in Internet of Things (IoT), cloud computing, and low-cost sensors have broadened HAR applications across fields like healthcare, biometrics, sports, and personal fitness. However, challenges remain in efficiently processing the vast amounts of data generated by these devices and developing models that can accurately recognize a wide range of activities from continuous recordings, without relying on predefined activity training sessions. This paper presents a comprehensive framework for imputing, analyzing, and identifying activities from wearable data, specifically targeting group training scenarios without explicit activity sessions. Our approach is based on data collected from 135 soldiers wearing Garmin 55 smartwatches over six months. The framework integrates multiple data streams, handles missing data through cross-domain statistical methods, and identifies activities with high accuracy using machine learning (ML). Additionally, we utilized statistical analysis techniques to evaluate the performance of each individual within the group, providing valuable insights into their respective positions in the group in an easy-to-understand visualization. These visualizations facilitate easy understanding of performance metrics, enhancing group interactions and informing individualized training programs. We evaluate our framework through traditional train-test splits and out-of-sample scenarios, focusing on the model's generalization capabilities. Additionally, we address sleep data imputation without relying on ML, improving recovery analysis. Our findings demonstrate the potential of wearable data for accurately identifying group activities, paving the way for intelligent, data-driven training solutions.<|reference_end|>
arxiv
@article{gahtan2024automatic, title={Automatic Identification and Visualization of Group Training Activities Using Wearable Data}, author={Barak Gahtan, Shany Funk, Einat Kodesh, Itay Ketko, Tsvi Kuflik, Alex M. Bronstein}, journal={arXiv preprint arXiv:2410.05452}, year={2024}, archivePrefix={arXiv}, eprint={2410.05452}, primaryClass={cs.LG cs.HC} }
gahtan2024automatic
arxiv-666750
2410.05453
Interconnected Kingdoms: Comparing 'A Song of Ice and Fire' Adaptations Across Media Using Complex Networks
<|reference_start|>Interconnected Kingdoms: Comparing 'A Song of Ice and Fire' Adaptations Across Media Using Complex Networks: In this article, we propose and apply a method to compare adaptations of the same story across different media. We tackle this task by modelling such adaptations through character networks. We compare them by leveraging two concepts at the core of storytelling: the characters involved, and the dynamics of the story. We propose several methods to match characters between media and compare their position in the networks; and perform narrative matching, i.e. match the sequences of narrative units that constitute the plots. We apply these methods to the novel series \textit{A Song of Ice and Fire}, by G.R.R. Martin, and its comics and TV show adaptations. Our results show that interactions between characters are not sufficient to properly match individual characters between adaptations, but that using some additional information such as character affiliation or gender significantly improves the performance. On the contrary, character interactions convey enough information to perform narrative matching, and allow us to detect the divergence between the original novels and its TV show adaptation.<|reference_end|>
arxiv
@article{amalvy2024interconnected, title={Interconnected Kingdoms: Comparing 'A Song of Ice and Fire' Adaptations Across Media Using Complex Networks}, author={Arthur Amalvy, Madeleine Janickyj, Shane Mannion, P'adraig MacCarron, Vincent Labatut}, journal={Social Network Analysis and Mining 14, 199 (2024)}, year={2024}, doi={10.1007/s13278-024-01365-z}, archivePrefix={arXiv}, eprint={2410.05453}, primaryClass={cs.SI cs.CL} }
amalvy2024interconnected
arxiv-666751
2410.05454
Meta-Dynamical State Space Models for Integrative Neural Data Analysis
<|reference_start|>Meta-Dynamical State Space Models for Integrative Neural Data Analysis: Learning shared structure across environments facilitates rapid learning and adaptive behavior in neural systems. This has been widely demonstrated and applied in machine learning to train models that are capable of generalizing to novel settings. However, there has been limited work exploiting the shared structure in neural activity during similar tasks for learning latent dynamics from neural recordings. Existing approaches are designed to infer dynamics from a single dataset and cannot be readily adapted to account for statistical heterogeneities across recordings. In this work, we hypothesize that similar tasks admit a corresponding family of related solutions and propose a novel approach for meta-learning this solution space from task-related neural activity of trained animals. Specifically, we capture the variabilities across recordings on a low-dimensional manifold which concisely parametrizes this family of dynamics, thereby facilitating rapid learning of latent dynamics given new recordings. We demonstrate the efficacy of our approach on few-shot reconstruction and forecasting of synthetic dynamical systems, and neural recordings from the motor cortex during different arm reaching tasks.<|reference_end|>
arxiv
@article{vermani2024meta-dynamical, title={Meta-Dynamical State Space Models for Integrative Neural Data Analysis}, author={Ayesha Vermani, Josue Nassar, Hyungju Jeon, Matthew Dowling, Il Memming Park}, journal={arXiv preprint arXiv:2410.05454}, year={2024}, archivePrefix={arXiv}, eprint={2410.05454}, primaryClass={stat.ML cs.LG q-bio.NC} }
vermani2024meta-dynamical
arxiv-666752
2410.05455
Dynamic HumTrans: Humming Transcription Using CNNs and Dynamic Programming
<|reference_start|>Dynamic HumTrans: Humming Transcription Using CNNs and Dynamic Programming: We propose a novel approach for humming transcription that combines a CNN-based architecture with a dynamic programming-based post-processing algorithm, utilizing the recently introduced HumTrans dataset. We identify and address inherent problems with the offset and onset ground truth provided by the dataset, offering heuristics to improve these annotations, resulting in a dataset with precise annotations that will aid future research. Additionally, we compare the transcription accuracy of our method against several others, demonstrating state-of-the-art (SOTA) results. All our code and corrected dataset is available at https://github.com/shubham-gupta-30/humming_transcription<|reference_end|>
arxiv
@article{gupta2024dynamic, title={Dynamic HumTrans: Humming Transcription Using CNNs and Dynamic Programming}, author={Shubham Gupta and Isaac Neri Gomez-Sarmiento and Faez Amjed Mezdari and Mirco Ravanelli and Cem Subakan}, journal={arXiv preprint arXiv:2410.05455}, year={2024}, archivePrefix={arXiv}, eprint={2410.05455}, primaryClass={cs.LG cs.AI cs.SD eess.AS} }
gupta2024dynamic
arxiv-666753
2410.05458
Testing Credibility of Public and Private Surveys through the Lens of Regression
<|reference_start|>Testing Credibility of Public and Private Surveys through the Lens of Regression: Testing whether a sample survey is a credible representation of the population is an important question to ensure the validity of any downstream research. While this problem, in general, does not have an efficient solution, one might take a task-based approach and aim to understand whether a certain data analysis tool, like linear regression, would yield similar answers both on the population and the sample survey. In this paper, we design an algorithm to test the credibility of a sample survey in terms of linear regression. In other words, we design an algorithm that can certify if a sample survey is good enough to guarantee the correctness of data analysis done using linear regression tools. Nowadays, one is naturally concerned about data privacy in surveys. Thus, we further test the credibility of surveys published in a differentially private manner. Specifically, we focus on Local Differential Privacy (LDP), which is a standard technique to ensure privacy in surveys where the survey participants might not trust the aggregator. We extend our algorithm to work even when the data analysis has been done using surveys with LDP. In the process, we also propose an algorithm that learns with high probability the guarantees a linear regression model on a survey published with LDP. Our algorithm also serves as a mechanism to learn linear regression models from data corrupted with noise coming from any subexponential distribution. We prove that it achieves the optimal estimation error bound for $\ell_1$ linear regression, which might be of broader interest. We prove the theoretical correctness of our algorithms while trying to reduce the sample complexity for both public and private surveys. We also numerically demonstrate the performance of our algorithms on real and synthetic datasets.<|reference_end|>
arxiv
@article{basu2024testing, title={Testing Credibility of Public and Private Surveys through the Lens of Regression}, author={Debabrota Basu, Sourav Chakraborty, Debarshi Chanda, Buddha Dev Das, Arijit Ghosh, Arnab Ray}, journal={arXiv preprint arXiv:2410.05458}, year={2024}, archivePrefix={arXiv}, eprint={2410.05458}, primaryClass={cs.LG cs.CR stat.ME stat.ML} }
basu2024testing
arxiv-666754
2410.05459
From Sparse Dependence to Sparse Attention: Unveiling How Chain-of-Thought Enhances Transformer Sample Efficiency
<|reference_start|>From Sparse Dependence to Sparse Attention: Unveiling How Chain-of-Thought Enhances Transformer Sample Efficiency: Chain-of-thought (CoT) significantly enhances the reasoning performance of large language models (LLM). While current theoretical studies often attribute this improvement to increased expressiveness and computational capacity, we argue that expressiveness is not the primary limitation in the LLM regime, as current large models will fail on simple tasks. Using a parity-learning setup, we demonstrate that CoT can substantially improve sample efficiency even when the representation power is sufficient. Specifically, with CoT, a transformer can learn the function within polynomial samples, whereas without CoT, the required sample size is exponential. Additionally, we show that CoT simplifies the learning process by introducing sparse sequential dependencies among input tokens, and leads to a sparse and interpretable attention. We validate our theoretical analysis with both synthetic and real-world experiments, confirming that sparsity in attention layers is a key factor of the improvement induced by CoT.<|reference_end|>
arxiv
@article{wen2024from, title={From Sparse Dependence to Sparse Attention: Unveiling How Chain-of-Thought Enhances Transformer Sample Efficiency}, author={Kaiyue Wen, Huaqing Zhang, Hongzhou Lin, Jingzhao Zhang}, journal={arXiv preprint arXiv:2410.05459}, year={2024}, archivePrefix={arXiv}, eprint={2410.05459}, primaryClass={cs.LG cs.CL stat.ML} }
wen2024from
arxiv-666755
2410.05460
It's Not Easy Being Green: On the Energy Efficiency of Programming Languages
<|reference_start|>It's Not Easy Being Green: On the Energy Efficiency of Programming Languages: Does the choice of programming language affect energy consumption? Previous highly visible studies have established associations between certain programming languages and energy consumption. A causal misinterpretation of this work has led academics and industry leaders to use or support certain languages based on their claimed impact on energy consumption. This paper tackles this causal question directly. It first corrects and improves the measurement methodology used by prior work. It then develops a detailed causal model capturing the complex relationship between programming language choice and energy consumption. This model identifies and incorporates several critical but previously overlooked factors that affect energy usage. These factors, such as distinguishing programming languages from their implementations, the impact of the application implementations themselves, the number of active cores, and memory activity, can significantly skew energy consumption measurements if not accounted for. We show -- via empirical experiments, improved methodology, and careful examination of anomalies -- that when these factors are controlled for, notable discrepancies in prior work vanish. Our analysis suggests that the choice of programming language implementation has no significant impact on energy consumption beyond execution time.<|reference_end|>
arxiv
@article{van kempen2024it's, title={It's Not Easy Being Green: On the Energy Efficiency of Programming Languages}, author={Nicolas van Kempen and Hyuk-Je Kwon and Dung Tuan Nguyen and Emery D. Berger}, journal={arXiv preprint arXiv:2410.05460}, year={2024}, archivePrefix={arXiv}, eprint={2410.05460}, primaryClass={cs.PL cs.PF} }
van kempen2024it's
arxiv-666756
2410.05462
LevAttention: Time, Space, and Streaming Efficient Algorithm for Heavy Attentions
<|reference_start|>LevAttention: Time, Space, and Streaming Efficient Algorithm for Heavy Attentions: A central problem related to transformers can be stated as follows: given two $n \times d$ matrices $Q$ and $K$, and a non-negative function $f$, define the matrix $A$ as follows: (1) apply the function $f$ to each entry of the $n \times n$ matrix $Q K^T$, and then (2) normalize each of the row sums of $A$ to be equal to $1$. The matrix $A$ can be computed in $O(n^2 d)$ time assuming $f$ can be applied to a number in constant time, but the quadratic dependence on $n$ is prohibitive in applications where it corresponds to long context lengths. For a large class of functions $f$, we show how to find all the ``large attention scores", i.e., entries of $A$ which are at least a positive value $\varepsilon$, in time with linear dependence on $n$ (i.e., $n \cdot \textrm{poly}(d/\varepsilon)$) for a positive parameter $\varepsilon > 0$. Our class of functions include all functions $f$ of the form $f(x) = |x|^p$, as explored recently in transformer models. Using recently developed tools from randomized numerical linear algebra, we prove that for any $K$, there is a ``universal set" $U \subset [n]$ of size independent of $n$, such that for any $Q$ and any row $i$, the large attention scores $A_{i,j}$ in row $i$ of $A$ all have $j \in U$. We also find $U$ in $n \cdot \textrm{poly}(d/\varepsilon)$ time. Notably, we (1) make no assumptions on the data, (2) our workspace does not grow with $n$, and (3) our algorithms can be computed in streaming and parallel settings. We call the attention mechanism that uses only the subset of keys in the universal set as LevAttention since our algorithm to identify the universal set $U$ is based on leverage scores. We empirically show the benefits of our scheme for vision transformers, showing how to train new models that use our universal set while training as well, showing that our model is able to consistently select ``important keys'' during training.<|reference_end|>
arxiv
@article{kannan2024levattention:, title={LevAttention: Time, Space, and Streaming Efficient Algorithm for Heavy Attentions}, author={Ravindran Kannan, Chiranjib Bhattacharyya, Praneeth Kacham, David P. Woodruff}, journal={arXiv preprint arXiv:2410.05462}, year={2024}, archivePrefix={arXiv}, eprint={2410.05462}, primaryClass={cs.LG cs.DS} }
kannan2024levattention:
arxiv-666757
2410.05464
Progressive distillation induces an implicit curriculum
<|reference_start|>Progressive distillation induces an implicit curriculum: Knowledge distillation leverages a teacher model to improve the training of a student model. A persistent challenge is that a better teacher does not always yield a better student, to which a common mitigation is to use additional supervision from several ``intermediate'' teachers. One empirically validated variant of this principle is progressive distillation, where the student learns from successive intermediate checkpoints of the teacher. Using sparse parity as a sandbox, we identify an implicit curriculum as one mechanism through which progressive distillation accelerates the student's learning. This curriculum is available only through the intermediate checkpoints but not the final converged one, and imparts both empirical acceleration and a provable sample complexity benefit to the student. We then extend our investigation to Transformers trained on probabilistic context-free grammars (PCFGs) and real-world pre-training datasets (Wikipedia and Books). Through probing the teacher model, we identify an analogous implicit curriculum where the model progressively learns features that capture longer context. Our theoretical and empirical findings on sparse parity, complemented by empirical observations on more complex tasks, highlight the benefit of progressive distillation via implicit curriculum across setups.<|reference_end|>
arxiv
@article{panigrahi2024progressive, title={Progressive distillation induces an implicit curriculum}, author={Abhishek Panigrahi, Bingbin Liu, Sadhika Malladi, Andrej Risteski, Surbhi Goel}, journal={arXiv preprint arXiv:2410.05464}, year={2024}, archivePrefix={arXiv}, eprint={2410.05464}, primaryClass={cs.LG} }
panigrahi2024progressive
arxiv-666758
2410.05465
On the Expressive Power of Tree-Structured Probabilistic Circuits
<|reference_start|>On the Expressive Power of Tree-Structured Probabilistic Circuits: Probabilistic circuits (PCs) have emerged as a powerful framework to compactly represent probability distributions for efficient and exact probabilistic inference. It has been shown that PCs with a general directed acyclic graph (DAG) structure can be understood as a mixture of exponentially (in its height) many components, each of which is a product distribution over univariate marginals. However, existing structure learning algorithms for PCs often generate tree-structured circuits or use tree-structured circuits as intermediate steps to compress them into DAG-structured circuits. This leads to the intriguing question of whether there exists an exponential gap between DAGs and trees for the PC structure. In this paper, we provide a negative answer to this conjecture by proving that, for $n$ variables, there exists a sub-exponential upper bound $n^{O(\log n)}$ on the size of an equivalent tree computing the same probability distribution. On the other hand, we also show that given a depth restriction on the tree, there is a super-polynomial separation between tree and DAG-structured PCs. Our work takes an important step towards understanding the expressive power of tree-structured PCs, and our techniques may be of independent interest in the study of structure learning algorithms for PCs.<|reference_end|>
arxiv
@article{yin2024on, title={On the Expressive Power of Tree-Structured Probabilistic Circuits}, author={Lang Yin, Han Zhao}, journal={arXiv preprint arXiv:2410.05465}, year={2024}, archivePrefix={arXiv}, eprint={2410.05465}, primaryClass={cs.AI cs.LG} }
yin2024on
arxiv-666759
2410.05466
Herd Mentality in Augmentation -- Not a Good Idea! A Robust Multi-stage Approach towards Deepfake Detection
<|reference_start|>Herd Mentality in Augmentation -- Not a Good Idea! A Robust Multi-stage Approach towards Deepfake Detection: The rapid increase in deepfake technology has raised significant concerns about digital media integrity. Detecting deepfakes is crucial for safeguarding digital media. However, most standard image classifiers fail to distinguish between fake and real faces. Our analysis reveals that this failure is due to the model's inability to explicitly focus on the artefacts typically in deepfakes. We propose an enhanced architecture based on the GenConViT model, which incorporates weighted loss and update augmentation techniques and includes masked eye pretraining. This proposed model improves the F1 score by 1.71% and the accuracy by 4.34% on the Celeb-DF v2 dataset. The source code for our model is available at https://github.com/Monu-Khicher-1/multi-stage-learning<|reference_end|>
arxiv
@article{monu2024herd, title={Herd Mentality in Augmentation -- Not a Good Idea! A Robust Multi-stage Approach towards Deepfake Detection}, author={Monu, Rohan Raju Dhanakshirur}, journal={arXiv preprint arXiv:2410.05466}, year={2024}, archivePrefix={arXiv}, eprint={2410.05466}, primaryClass={cs.CV cs.AI} }
monu2024herd
arxiv-666760
2410.05468
PH-Dropout: Prctical Epistemic Uncertainty Quantification for View Synthesis
<|reference_start|>PH-Dropout: Prctical Epistemic Uncertainty Quantification for View Synthesis: View synthesis using Neural Radiance Fields (NeRF) and Gaussian Splatting (GS) has demonstrated impressive fidelity in rendering real-world scenarios. However, practical methods for accurate and efficient epistemic Uncertainty Quantification (UQ) in view synthesis are lacking. Existing approaches for NeRF either introduce significant computational overhead (e.g., ``10x increase in training time" or ``10x repeated training") or are limited to specific uncertainty conditions or models. Notably, GS models lack any systematic approach for comprehensive epistemic UQ. This capability is crucial for improving the robustness and scalability of neural view synthesis, enabling active model updates, error estimation, and scalable ensemble modeling based on uncertainty. In this paper, we revisit NeRF and GS-based methods from a function approximation perspective, identifying key differences and connections in 3D representation learning. Building on these insights, we introduce PH-Dropout (Post hoc Dropout), the first real-time and accurate method for epistemic uncertainty estimation that operates directly on pre-trained NeRF and GS models. Extensive evaluations validate our theoretical findings and demonstrate the effectiveness of PH-Dropout.<|reference_end|>
arxiv
@article{sun2024ph-dropout:, title={PH-Dropout: Practical Epistemic Uncertainty Quantification for View Synthesis}, author={Chuanhao Sun, Thanos Triantafyllou, Anthos Makris, Maja Drmav{c}, Kai Xu, Luo Mai, Mahesh K. Marina}, journal={arXiv preprint arXiv:2410.05468}, year={2024}, archivePrefix={arXiv}, eprint={2410.05468}, primaryClass={cs.CV} }
sun2024ph-dropout:
arxiv-666761
2410.05470
Image Watermarks are Removable Using Controllable Regeneration from Clean Noise
<|reference_start|>Image Watermarks are Removable Using Controllable Regeneration from Clean Noise: Image watermark techniques provide an effective way to assert ownership, deter misuse, and trace content sources, which has become increasingly essential in the era of large generative models. A critical attribute of watermark techniques is their robustness against various manipulations. In this paper, we introduce a watermark removal approach capable of effectively nullifying the state of the art watermarking techniques. Our primary insight involves regenerating the watermarked image starting from a clean Gaussian noise via a controllable diffusion model, utilizing the extracted semantic and spatial features from the watermarked image. The semantic control adapter and the spatial control network are specifically trained to control the denoising process towards ensuring image quality and enhancing consistency between the cleaned image and the original watermarked image. To achieve a smooth trade-off between watermark removal performance and image consistency, we further propose an adjustable and controllable regeneration scheme. This scheme adds varying numbers of noise steps to the latent representation of the watermarked image, followed by a controlled denoising process starting from this noisy latent representation. As the number of noise steps increases, the latent representation progressively approaches clean Gaussian noise, facilitating the desired trade-off. We apply our watermark removal methods across various watermarking techniques, and the results demonstrate that our methods offer superior visual consistency/quality and enhanced watermark removal performance compared to existing regeneration approaches.<|reference_end|>
arxiv
@article{liu2024image, title={Image Watermarks are Removable Using Controllable Regeneration from Clean Noise}, author={Yepeng Liu, Yiren Song, Hai Ci, Yu Zhang, Haofan Wang, Mike Zheng Shou, Yuheng Bu}, journal={arXiv preprint arXiv:2410.05470}, year={2024}, archivePrefix={arXiv}, eprint={2410.05470}, primaryClass={cs.CR cs.AI cs.CV} }
liu2024image
arxiv-666762
2410.05471
Exact sensitivity analysis of Markov reward processes via algebraic geometry
<|reference_start|>Exact sensitivity analysis of Markov reward processes via algebraic geometry: We introduce a new approach for deterministic sensitivity analysis of Markov reward processes, commonly used in cost-effectiveness analyses, via reformulation into a polynomial system. Our approach leverages cylindrical algebraic decomposition (CAD), a technique arising from algebraic geometry that provides an exact description of all solutions to a polynomial system. While it is typically intractable to build a CAD for systems with more than a few variables, we show that a special class of polynomial systems, which includes the polynomials arising from Markov reward processes, can be analyzed much more tractably. We establish several theoretical results about such systems and develop a specialized algorithm to construct their CAD, which allows us to perform exact, multi-way sensitivity analysis for common health economic analyses. We develop an open-source software package that implements our algorithm. Finally, we apply it to two case studies, one with synthetic data and one that re-analyzes a previous cost-effectiveness analysis from the literature, demonstrating advantages of our approach over standard techniques. Our software and code are available at: \url{https://github.com/mmaaz-git/markovag}.<|reference_end|>
arxiv
@article{chan2024exact, title={Exact sensitivity analysis of Markov reward processes via algebraic geometry}, author={Timothy C. Y. Chan, Muhammad Maaz}, journal={arXiv preprint arXiv:2410.05471}, year={2024}, archivePrefix={arXiv}, eprint={2410.05471}, primaryClass={math.OC cs.MS math.AG math.PR} }
chan2024exact
arxiv-666763
2410.05472
Neural machine translation system for Lezgian, Russian and Azerbaijani languages
<|reference_start|>Neural machine translation system for Lezgian, Russian and Azerbaijani languages: We release the first neural machine translation system for translation between Russian, Azerbaijani and the endangered Lezgian languages, as well as monolingual and parallel datasets collected and aligned for training and evaluating the system. Multiple experiments are conducted to identify how different sets of training language pairs and data domains can influence the resulting translation quality. We achieve BLEU scores of 26.14 for Lezgian-Azerbaijani, 22.89 for Azerbaijani-Lezgian, 29.48 for Lezgian-Russian and 24.25 for Russian-Lezgian pairs. The quality of zero-shot translation is assessed on a Large Language Model, showing its high level of fluency in Lezgian. However, the model often refuses to translate, justifying itself with its incompetence. We contribute our translation model along with the collected parallel and monolingual corpora and sentence encoder for the Lezgian language.<|reference_end|>
arxiv
@article{asvarov2024neural, title={Neural machine translation system for Lezgian, Russian and Azerbaijani languages}, author={Alidar Asvarov and Andrey Grabovoy}, journal={arXiv preprint arXiv:2410.05472}, year={2024}, archivePrefix={arXiv}, eprint={2410.05472}, primaryClass={cs.CL} }
asvarov2024neural
arxiv-666764
2410.05474
R-Bench: Are your Large Multimodal Model Robust to Real-world Corruptions?
<|reference_start|>R-Bench: Are your Large Multimodal Model Robust to Real-world Corruptions?: The outstanding performance of Large Multimodal Models (LMMs) has made them widely applied in vision-related tasks. However, various corruptions in the real world mean that images will not be as ideal as in simulations, presenting significant challenges for the practical application of LMMs. To address this issue, we introduce R-Bench, a benchmark focused on the **Real-world Robustness of LMMs**. Specifically, we: (a) model the complete link from user capture to LMMs reception, comprising 33 corruption dimensions, including 7 steps according to the corruption sequence, and 7 groups based on low-level attributes; (b) collect reference/distorted image dataset before/after corruption, including 2,970 question-answer pairs with human labeling; (c) propose comprehensive evaluation for absolute/relative robustness and benchmark 20 mainstream LMMs. Results show that while LMMs can correctly handle the original reference images, their performance is not stable when faced with distorted images, and there is a significant gap in robustness compared to the human visual system. We hope that R-Bench will inspire improving the robustness of LMMs, **extending them from experimental simulations to the real-world application**. Check https://q-future.github.io/R-Bench for details.<|reference_end|>
arxiv
@article{li2024r-bench:, title={R-Bench: Are your Large Multimodal Model Robust to Real-world Corruptions?}, author={Chunyi Li, Jianbo Zhang, Zicheng Zhang, Haoning Wu, Yuan Tian, Wei Sun, Guo Lu, Xiaohong Liu, Xiongkuo Min, Weisi Lin, Guangtao Zhai}, journal={arXiv preprint arXiv:2410.05474}, year={2024}, archivePrefix={arXiv}, eprint={2410.05474}, primaryClass={cs.CV cs.MM eess.IV} }
li2024r-bench:
arxiv-666765
2410.05479
Ensured: Explanations for Decreasing the Epistemic Uncertainty in Predictions
<|reference_start|>Ensured: Explanations for Decreasing the Epistemic Uncertainty in Predictions: This paper addresses a significant gap in explainable AI: the necessity of interpreting epistemic uncertainty in model explanations. Although current methods mainly focus on explaining predictions, with some including uncertainty, they fail to provide guidance on how to reduce the inherent uncertainty in these predictions. To overcome this challenge, we introduce new types of explanations that specifically target epistemic uncertainty. These include ensured explanations, which highlight feature modifications that can reduce uncertainty, and categorisation of uncertain explanations counter-potential, semi-potential, and super-potential which explore alternative scenarios. Our work emphasises that epistemic uncertainty adds a crucial dimension to explanation quality, demanding evaluation based not only on prediction probability but also on uncertainty reduction. We introduce a new metric, ensured ranking, designed to help users identify the most reliable explanations by balancing trade-offs between uncertainty, probability, and competing alternative explanations. Furthermore, we extend the Calibrated Explanations method, incorporating tools that visualise how changes in feature values impact epistemic uncertainty. This enhancement provides deeper insights into model behaviour, promoting increased interpretability and appropriate trust in scenarios involving uncertain predictions.<|reference_end|>
arxiv
@article{löfström2024ensured:, title={Ensured: Explanations for Decreasing the Epistemic Uncertainty in Predictions}, author={Helena L"ofstr"om, Tuwe L"ofstr"om, Johan Hallberg Szabadvary}, journal={arXiv preprint arXiv:2410.05479}, year={2024}, archivePrefix={arXiv}, eprint={2410.05479}, primaryClass={cs.AI cs.LG} }
löfström2024ensured:
arxiv-666766
2410.05481
fPLSA: Learning Semantic Structures in Document Collections Using Foundation Models
<|reference_start|>fPLSA: Learning Semantic Structures in Document Collections Using Foundation Models: Humans have the ability to learn new tasks by inferring high-level concepts from existing solution, then manipulating these concepts in lieu of the raw data. Can we automate this process by deriving latent semantic structures in a document collection using foundation models? We introduce fPLSA, a foundation-model-based Probabilistic Latent Semantic Analysis (PLSA) method that iteratively clusters and tags document segments based on document-level contexts. These tags can be used to model the structure of given documents and for hierarchical sampling of new texts. Our experiments on story writing, math, and multi-step reasoning datasets demonstrate that fPLSA tags help reconstruct the original texts better than existing tagging methods. Moreover, when used for hierarchical sampling, fPLSA produces more diverse outputs with a higher likelihood of hitting the correct answer than direct sampling and hierarchical sampling with existing tagging methods.<|reference_end|>
arxiv
@article{xu2024fplsa:, title={fPLSA: Learning Semantic Structures in Document Collections Using Foundation Models}, author={Weijia Xu, Nebojsa Jojic, Nicolas Le Roux}, journal={arXiv preprint arXiv:2410.05481}, year={2024}, archivePrefix={arXiv}, eprint={2410.05481}, primaryClass={cs.LG} }
xu2024fplsa:
arxiv-666767
2410.05483
High-Order Spectral Simulation of Dispersive Two-Dimensional Materials
<|reference_start|>High-Order Spectral Simulation of Dispersive Two-Dimensional Materials: Over the past twenty years, the field of plasmonics has been revolutionized with the isolation and utilization of two--dimensional materials, particularly graphene. Consequently there is significant interest in rapid, robust, and highly accurate computational schemes which can incorporate such materials. Standard volumetric approaches can be contemplated, but these require huge computational resources. Here we describe an algorithm which addresses this issue for nonlocal models of the electromagnetic response of graphene. Our methodology not only approximates the graphene layer with a surface current, but also reformulates the governing volumetric equations in terms of surface quantities using Dirichlet--Neumann Operators. We have recently shown how these surface equations can be numerically simulated in an efficient, stable, and accurate fashion using a High--Order Perturbation of Envelopes methodology. We extend these results to the nonlocal model mentioned above, and using an implementation of this algorithm, we study absorbance spectra of TM polarized plane--waves scattered by a periodic grid of graphene ribbons.<|reference_end|>
arxiv
@article{nicholls2024high-order, title={High-Order Spectral Simulation of Dispersive Two-Dimensional Materials}, author={David Nicholls and Tianyu Zhu}, journal={arXiv preprint arXiv:2410.05483}, year={2024}, archivePrefix={arXiv}, eprint={2410.05483}, primaryClass={math.NA cs.NA} }
nicholls2024high-order
arxiv-666768
2410.05484
Neural Networks Decoded: Targeted and Robust Analysis of Neural Network Decisions via Causal Explanations and Reasoning
<|reference_start|>Neural Networks Decoded: Targeted and Robust Analysis of Neural Network Decisions via Causal Explanations and Reasoning: Despite their success and widespread adoption, the opaque nature of deep neural networks (DNNs) continues to hinder trust, especially in critical applications. Current interpretability solutions often yield inconsistent or oversimplified explanations, or require model changes that compromise performance. In this work, we introduce TRACER, a novel method grounded in causal inference theory designed to estimate the causal dynamics underpinning DNN decisions without altering their architecture or compromising their performance. Our approach systematically intervenes on input features to observe how specific changes propagate through the network, affecting internal activations and final outputs. Based on this analysis, we determine the importance of individual features, and construct a high-level causal map by grouping functionally similar layers into cohesive causal nodes, providing a structured and interpretable view of how different parts of the network influence the decisions. TRACER further enhances explainability by generating counterfactuals that reveal possible model biases and offer contrastive explanations for misclassifications. Through comprehensive evaluations across diverse datasets, we demonstrate TRACER's effectiveness over existing methods and show its potential for creating highly compressed yet accurate models, illustrating its dual versatility in both understanding and optimizing DNNs.<|reference_end|>
arxiv
@article{diallo2024neural, title={Neural Networks Decoded: Targeted and Robust Analysis of Neural Network Decisions via Causal Explanations and Reasoning}, author={Alec F. Diallo, Vaishak Belle, Paul Patras}, journal={arXiv preprint arXiv:2410.05484}, year={2024}, archivePrefix={arXiv}, eprint={2410.05484}, primaryClass={cs.LG cs.AI stat.ME} }
diallo2024neural
arxiv-666769
2410.05486
Multi-Window Approaches for Direct and Stable STFT Phase Retrieval
<|reference_start|>Multi-Window Approaches for Direct and Stable STFT Phase Retrieval: Phase retrieval from phaseless short-time Fourier transform (STFT) measurements is known to be inherently unstable when measurements are taken with respect to a single window. While an explicit inversion formula exists, it is useless in practice due to its instability. In this paper, we overcome this lack of stability by presenting two multi-window approaches that rely on a "good coverage" of the time-frequency plane by the ambiguity functions of the windows. The first is to use the fractional Fourier transform of a dilated Gauss function with various angles as window functions. The essential support of a superposition of the ambiguity function from such window functions is of a "daffodil shape", which converges to a large disc as more angles are used, yielding a much broader coverage in the time-frequency domain. The second approach uses Hermite functions of various degrees as the window functions. The larger the degree, the wider the ambiguity function but with zeros on circles in the time-frequency domain. Combining Hermite functions of different degrees, we can achieve a wide coverage with zeros compensated by the essential support of the ambiguity function from other Hermite windows. Taking advantage of these multi-window procedures, we can stably perform STFT phase retrieval using the direct inversion formula.<|reference_end|>
arxiv
@article{alaifari2024multi-window, title={Multi-Window Approaches for Direct and Stable STFT Phase Retrieval}, author={Rima Alaifari and Yunan Yang}, journal={arXiv preprint arXiv:2410.05486}, year={2024}, archivePrefix={arXiv}, eprint={2410.05486}, primaryClass={math.FA cs.NA math.NA} }
alaifari2024multi-window
arxiv-666770
2410.05487
Group Fairness Metrics for Community Detection Methods in Social Networks
<|reference_start|>Group Fairness Metrics for Community Detection Methods in Social Networks: Understanding community structure has played an essential role in explaining network evolution, as nodes join communities which connect further to form large-scale complex networks. In real-world networks, nodes are often organized into communities based on ethnicity, gender, race, or wealth, leading to structural biases and inequalities. Community detection (CD) methods use network structure and nodes' attributes to identify communities, and can produce biased outcomes if they fail to account for structural inequalities, especially affecting minority groups. In this work, we propose group fairness metrics ($\Phi^{F*}_{p}$) to evaluate CD methods from a fairness perspective. We also conduct a comparative analysis of existing CD methods, focusing on the performance-fairness trade-off, to determine whether certain methods favor specific types of communities based on their size, density, or conductance. Our findings reveal that the trade-off varies significantly across methods, with no specific type of method consistently outperforming others. The proposed metrics and insights will help develop and evaluate fair and high performing CD methods.<|reference_end|>
arxiv
@article{de vink2024group, title={Group Fairness Metrics for Community Detection Methods in Social Networks}, author={Elze de Vink, Akrati Saxena}, journal={arXiv preprint arXiv:2410.05487}, year={2024}, archivePrefix={arXiv}, eprint={2410.05487}, primaryClass={cs.SI} }
de vink2024group
arxiv-666771
2410.05488
Automatic Instantiation of Assurance Cases from Patterns Using Large Language Models
<|reference_start|>Automatic Instantiation of Assurance Cases from Patterns Using Large Language Models: An assurance case is a structured set of arguments supported by evidence, demonstrating that a system's non-functional requirements (e.g., safety, security, reliability) have been correctly implemented. Assurance case patterns serve as templates derived from previous successful assurance cases, aimed at facilitating the creation of new assurance cases. Despite the use of these patterns to generate assurance cases, their instantiation remains a largely manual and error-prone process that heavily relies on domain expertise. Thus, exploring techniques to support their automatic instantiation becomes crucial. This study aims to investigate the potential of Large Language Models (LLMs) in automating the generation of assurance cases that comply with specific patterns. Specifically, we formalize assurance case patterns using predicate-based rules and then utilize LLMs, i.e., GPT-4o and GPT-4 Turbo, to automatically instantiate assurance cases from these formalized patterns. Our findings suggest that LLMs can generate assurance cases that comply with the given patterns. However, this study also highlights that LLMs may struggle with understanding some nuances related to pattern-specific relationships. While LLMs exhibit potential in the automatic generation of assurance cases, their capabilities still fall short compared to human experts. Therefore, a semi-automatic approach to instantiating assurance cases may be more practical at this time.<|reference_end|>
arxiv
@article{odu2024automatic, title={Automatic Instantiation of Assurance Cases from Patterns Using Large Language Models}, author={Oluwafemi Odu, Alvine B. Belle, Song Wang, Segla Kpodjedo, Timothy C. Lethbridge, Hadi Hemmati}, journal={arXiv preprint arXiv:2410.05488}, year={2024}, archivePrefix={arXiv}, eprint={2410.05488}, primaryClass={cs.SE} }
odu2024automatic
arxiv-666772
2410.05489
An Adaptive Reconstruction Method for Arbitrary High-Order Accuracy Using Discontinuity Feedback
<|reference_start|>An Adaptive Reconstruction Method for Arbitrary High-Order Accuracy Using Discontinuity Feedback: This paper introduces an effcient class of adaptive stencil extension reconstruction methods based on a discontinuity feedback factor, addressing the challenges of weak robustness and high computational cost in high-order schemes, particularly those of 7th-order or above. Two key innovations are presented: The accuracy order adaptively increases from the lowest level based on local stencil smoothness, contrasting with conventional methods like Weighted Essentially Non-Oscillatory (WENO) and Monotonic Upstream-Centered Scheme for Conservation Laws (MUSCL)limiters, which typically reduce order from the highest level. The Discontinuity Feedback Factor (DF) serves a dual purpose: detecting sub-cell discontinuity strength and explicitly incorporating into the reconstruction process as a local smoothness measure. This approach eliminates the need for computationally expensive smoothness indicators often required in very high-order schemes, such as 9th-order schemes, and can be easily generalized to arbitrary high-order schemes. Rigorous test cases, including a Mach 20000 jet, demonstrate the exceptional robustness of this approach.<|reference_end|>
arxiv
@article{zhang2024an, title={An Adaptive Reconstruction Method for Arbitrary High-Order Accuracy Using Discontinuity Feedback}, author={Hong Zhang, Yue Zhao, Xing Ji, Kun Xu}, journal={arXiv preprint arXiv:2410.05489}, year={2024}, archivePrefix={arXiv}, eprint={2410.05489}, primaryClass={math.NA cs.NA} }
zhang2024an
arxiv-666773
2410.05490
Nonlinear High-Pass Filters
<|reference_start|>Nonlinear High-Pass Filters: Linear high-pass phenomena matter in signal processing, circuits, and control. In nonlinear systems, however, there is no working definition of high-pass behavior. Any definition would have to agree with the existing theory on linear systems and offer concrete benefits for nonlinear systems above and beyond existing nonlinear theory. To satisfy these two requirements, we propose to define: a nonlinear input-output system is high-pass if its output is stable with respect to the derivative of the input. We first show that definition generalizes high-pass resistor-capacitor circuit analysis to accommodate nonlinear resistors. We then show that this definition generalizes the steady-state disturbance rejection property of integral feedback controllers for linear systems. The theoretical payoff is that low-frequency disturbance rejection is captured by a quantitative, non-asymptotic output cost bound. Finally, we raise theoretical questions about compositionality and noncommutativity of nonlinear operators.<|reference_end|>
arxiv
@article{kuang2024nonlinear, title={Nonlinear High-Pass Filters}, author={Simon Kuang and Xinfan Lin}, journal={arXiv preprint arXiv:2410.05490}, year={2024}, archivePrefix={arXiv}, eprint={2410.05490}, primaryClass={eess.SY cs.SY} }
kuang2024nonlinear
arxiv-666774
2410.05491
Pre-Ictal Seizure Prediction Using Personalized Deep Learning
<|reference_start|>Pre-Ictal Seizure Prediction Using Personalized Deep Learning: Introduction: Approximately 23 million or 30% of epilepsy patients worldwide suffer from drug-resistant epilepsy (DRE). The unpredictability of seizure occurrences, which causes safety issues as well as social concerns, restrict the lifestyles of DRE patients. Surgical solutions and EEG-based solutions are very expensive, unreliable, invasive or impractical. The goal of this research was to employ improved technologies and methods to epilepsy patient physiological data and predict seizures up to two hours before onset, enabling non-invasive, affordable seizure prediction for DRE patients. Methods: This research used a 1D Convolutional Neural Network-Based Bidirectional Long Short-Term Memory network that was trained on a diverse set of epileptic patient physiological data to predict seizures. Transfer learning was further utilized to personalize and optimize predictions for specific patients. Clinical data was retrospectively obtained for nine epilepsy patients via wearable devices over a period of about three to five days from a prospectively maintained database. The physiological data included 54 seizure occurrences and included heart rate, blood volume pulse, accelerometry, body temperature, and electrodermal activity. Results and Conclusion: A general deep-learning model trained on the physiological data with randomly sampled test data achieved an accuracy of 91.94%. However, such a generalized deep learning model had varied performances on data from unseen patients. When the general model was personalized (further trained) with patient-specific data, the personalized model achieved significantly improved performance with accuracies as high as 97%. This preliminary research shows that patient-specific personalization may be a viable approach to achieve affordable, non-invasive seizure prediction that can improve the quality of life for DRE patients.<|reference_end|>
arxiv
@article{jaddu2024pre-ictal, title={Pre-Ictal Seizure Prediction Using Personalized Deep Learning}, author={Shriya Jaddu, Sidh Jaddu, Camilo Gutierrez, and Quincy K. Tran}, journal={arXiv preprint arXiv:2410.05491}, year={2024}, archivePrefix={arXiv}, eprint={2410.05491}, primaryClass={cs.LG} }
jaddu2024pre-ictal
arxiv-666775
2410.05493
Transformers learn variable-order Markov chains in-context
<|reference_start|>Transformers learn variable-order Markov chains in-context: Large language models have demonstrated impressive in-context learning (ICL) capability. However, it is still unclear how the underlying transformers accomplish it, especially in more complex scenarios. Toward this goal, several recent works studied how transformers learn fixed-order Markov chains (FOMC) in context, yet natural languages are more suitably modeled by variable-order Markov chains (VOMC), i.e., context trees (CTs). In this work, we study the ICL of VOMC by viewing language modeling as a form of data compression and focus on small alphabets and low-order VOMCs. This perspective allows us to leverage mature compression algorithms, such as context-tree weighting (CTW) and prediction by partial matching (PPM) algorithms as baselines, the former of which is Bayesian optimal for a class of CTW priors. We empirically observe a few phenomena: 1) Transformers can indeed learn to compress VOMC in-context, while PPM suffers significantly; 2) The performance of transformers is not very sensitive to the number of layers, and even a two-layer transformer can learn in-context quite well; and 3) Transformers trained and tested on non-CTW priors can significantly outperform the CTW algorithm. To explain these phenomena, we analyze the attention map of the transformers and extract two mechanisms, on which we provide two transformer constructions: 1) A construction with $D+2$ layers that can mimic the CTW algorithm accurately for CTs of maximum order $D$, 2) A 2-layer transformer that utilizes the feed-forward network for probability blending. One distinction from the FOMC setting is that a counting mechanism appears to play an important role. We implement these synthetic transformer layers and show that such hybrid transformers can match the ICL performance of transformers, and more interestingly, some of them can perform even better despite the much-reduced parameter sets.<|reference_end|>
arxiv
@article{zhou2024transformers, title={Transformers learn variable-order Markov chains in-context}, author={Ruida Zhou, Chao Tian, Suhas Diggavi}, journal={arXiv preprint arXiv:2410.05493}, year={2024}, archivePrefix={arXiv}, eprint={2410.05493}, primaryClass={cs.LG cs.IT math.IT} }
zhou2024transformers
arxiv-666776
2410.05494
Tactile Displays Driven by Projected Light
<|reference_start|>Tactile Displays Driven by Projected Light: Tactile displays that lend tangible form to digital content could profoundly transform how we interact with computers, much like visual displays have driven successive revolutions in computing over the past 60 years. However, creating tactile displays with the actuation speeds, dynamic ranges, and resolutions that are required for perceptual fidelity has proved challenging. Here, we present a tactile display that directly converts projected light into visible tactile patterns using an energetically passive, photomechanical surface populated with arrays of millimeter-scale optotactile pixels. The pixels transduce incident light into mechanical displacements through rapid, light-stimulated thermal gas expansion, yielding displacements of up to 1 millimeter and response times of 2 to 100 milliseconds. Our use of projected light for power transmission and addressing enables these displays to be scaled in size and resolution at sustainable cost and complexity. We demonstrate devices with up to 1,511 independently addressable pixels. Perceptual studies confirm the capacity of the display to accurately reproduce tactile patterns in location, timing, frequency, and structure. This research establishes a foundation for practical, versatile high-resolution tactile displays driven by light.<|reference_end|>
arxiv
@article{linnander2024tactile, title={Tactile Displays Driven by Projected Light}, author={Max Linnander, Dustin Goetz, Gregory Reardon, Elliot Hawkes, and Yon Visell}, journal={arXiv preprint arXiv:2410.05494}, year={2024}, archivePrefix={arXiv}, eprint={2410.05494}, primaryClass={cs.ET cs.HC cs.RO physics.optics} }
linnander2024tactile
arxiv-666777
2410.05495
Self-rationalization improves LLM as a fine-grained judge
<|reference_start|>Self-rationalization improves LLM as a fine-grained judge: LLM-as-a-judge models have been used for evaluating both human and AI generated content, specifically by providing scores and rationales. Rationales, in addition to increasing transparency, help models learn to calibrate its judgments. Enhancing a model's rationale can therefore improve its calibration abilities and ultimately the ability to score content. We introduce Self-Rationalization, an iterative process of improving the rationales for the judge models, which consequently improves the score for fine-grained customizable scoring criteria (i.e., likert-scale scoring with arbitrary evaluation criteria). Self-rationalization works by having the model generate multiple judgments with rationales for the same input, curating a preference pair dataset from its own judgements, and iteratively fine-tuning the judge via DPO. Intuitively, this approach allows the judge model to self-improve by learning from its own rationales, leading to better alignment and evaluation accuracy. After just two iterations -- while only relying on examples in the training set -- human evaluation shows that our judge model learns to produce higher quality rationales, with a win rate of $62\%$ on average compared to models just trained via SFT on rationale . This judge model also achieves high scoring accuracy on BigGen Bench and Reward Bench, outperforming even bigger sized models trained using SFT with rationale, self-consistency or best-of-$N$ sampling by $3\%$ to $9\%$.<|reference_end|>
arxiv
@article{trivedi2024self-rationalization, title={Self-rationalization improves LLM as a fine-grained judge}, author={Prapti Trivedi, Aditya Gulati, Oliver Molenschot, Meghana Arakkal Rajeev, Rajkumar Ramamurthy, Keith Stevens, Tanveesh Singh Chaudhery, Jahnavi Jambholkar, James Zou, Nazneen Rajani}, journal={arXiv preprint arXiv:2410.05495}, year={2024}, archivePrefix={arXiv}, eprint={2410.05495}, primaryClass={cs.CL} }
trivedi2024self-rationalization
arxiv-666778
2410.05496
Intuitions of Compromise: Utilitarianism vs Contractualism
<|reference_start|>Intuitions of Compromise: Utilitarianism vs Contractualism: What is the best compromise in a situation where different people value different things? The most commonly accepted method for answering this question -- in fields across the behavioral and social sciences, decision theory, philosophy, and artificial intelligence development -- is simply to add up utilities associated with the different options and pick the solution with the largest sum. This ``utilitarian'' approach seems like the obvious, theory-neutral way of approaching the problem. But there is an important, though often-ignored, alternative: a ``contractualist'' approach, which advocates for an agreement-driven method of deciding. Remarkably, no research has presented empirical evidence directly comparing the intuitive plausibility of these two approaches. In this paper, we systematically explore the proposals suggested by each algorithm (the ``Utilitarian Sum'' and the contractualist ''Nash Product''), using a paradigm that applies those algorithms to aggregating preferences across groups in a social decision-making context. While the dominant approach to value aggregation up to now has been utilitarian, we find that people strongly prefer the aggregations recommended by the contractualist algorithm. Finally, we compare the judgments of large language models (LLMs) to that of our (human) participants, finding important misalignment between model and human preferences.<|reference_end|>
arxiv
@article{moore2024intuitions, title={Intuitions of Compromise: Utilitarianism vs. Contractualism}, author={Jared Moore, Yejin Choi, Sydney Levine}, journal={arXiv preprint arXiv:2410.05496}, year={2024}, archivePrefix={arXiv}, eprint={2410.05496}, primaryClass={cs.AI cs.GT} }
moore2024intuitions
arxiv-666779
2410.05497
EgoQR: Efficient QR Code Reading in Egocentric Settings
<|reference_start|>EgoQR: Efficient QR Code Reading in Egocentric Settings: QR codes have become ubiquitous in daily life, enabling rapid information exchange. With the increasing adoption of smart wearable devices, there is a need for efficient, and friction-less QR code reading capabilities from Egocentric point-of-views. However, adapting existing phone-based QR code readers to egocentric images poses significant challenges. Code reading from egocentric images bring unique challenges such as wide field-of-view, code distortion and lack of visual feedback as compared to phones where users can adjust the position and framing. Furthermore, wearable devices impose constraints on resources like compute, power and memory. To address these challenges, we present EgoQR, a novel system for reading QR codes from egocentric images, and is well suited for deployment on wearable devices. Our approach consists of two primary components: detection and decoding, designed to operate on high-resolution images on the device with minimal power consumption and added latency. The detection component efficiently locates potential QR codes within the image, while our enhanced decoding component extracts and interprets the encoded information. We incorporate innovative techniques to handle the specific challenges of egocentric imagery, such as varying perspectives, wider field of view, and motion blur. We evaluate our approach on a dataset of egocentric images, demonstrating 34% improvement in reading the code compared to an existing state of the art QR code readers.<|reference_end|>
arxiv
@article{moslehpour2024egoqr:, title={EgoQR: Efficient QR Code Reading in Egocentric Settings}, author={Mohsen Moslehpour, Yichao Lu, Pierce Chuang, Ashish Shenoy, Debojeet Chatterjee, Abhay Harpale, Srihari Jayakumar, Vikas Bhardwaj, Seonghyeon Nam, Anuj Kumar}, journal={arXiv preprint arXiv:2410.05497}, year={2024}, archivePrefix={arXiv}, eprint={2410.05497}, primaryClass={cs.CV} }
moslehpour2024egoqr:
arxiv-666780
2410.05499
Unitary convolutions for learning on graphs and groups
<|reference_start|>Unitary convolutions for learning on graphs and groups: Data with geometric structure is ubiquitous in machine learning often arising from fundamental symmetries in a domain, such as permutation-invariance in graphs and translation-invariance in images. Group-convolutional architectures, which encode symmetries as inductive bias, have shown great success in applications, but can suffer from instabilities as their depth increases and often struggle to learn long range dependencies in data. For instance, graph neural networks experience instability due to the convergence of node representations (over-smoothing), which can occur after only a few iterations of message-passing, reducing their effectiveness in downstream tasks. Here, we propose and study unitary group convolutions, which allow for deeper networks that are more stable during training. The main focus of the paper are graph neural networks, where we show that unitary graph convolutions provably avoid over-smoothing. Our experimental results confirm that unitary graph convolutional networks achieve competitive performance on benchmark datasets compared to state-of-the-art graph neural networks. We complement our analysis of the graph domain with the study of general unitary convolutions and analyze their role in enhancing stability in general group convolutional architectures.<|reference_end|>
arxiv
@article{kiani2024unitary, title={Unitary convolutions for learning on graphs and groups}, author={Bobak T. Kiani, Lukas Fesser, Melanie Weber}, journal={arXiv preprint arXiv:2410.05499}, year={2024}, archivePrefix={arXiv}, eprint={2410.05499}, primaryClass={cs.LG} }
kiani2024unitary
arxiv-666781
2410.05500
Residual Kolmogorov-Arnold Network for Enhanced Deep Learning
<|reference_start|>Residual Kolmogorov-Arnold Network for Enhanced Deep Learning: Despite the strong performance in many computer vision tasks, Convolutional Neural Networks (CNNs) can sometimes struggle to efficiently capture long-range, complex non-linear dependencies in deeper layers of the network. We address this limitation by introducing Residual KAN, which incorporates the Kolmogorov-Arnold Network (KAN) within the CNN framework as a residual component. Our approach uses Chebyshev polynomials as the basis for KAN convolutions that enables more expressive and adaptive feature representations while maintaining computational efficiency. The proposed RKAN blocks, when integrated into established architectures such as ResNet and DenseNet, offer consistent improvements over the baseline models on various well-known benchmarks. Our results demonstrate the potential of RKAN to enhance the capabilities of deep CNNs in visual data.<|reference_end|>
arxiv
@article{yu2024residual, title={Residual Kolmogorov-Arnold Network for Enhanced Deep Learning}, author={Ray Congrui Yu, Sherry Wu, Jiang Gui}, journal={arXiv preprint arXiv:2410.05500}, year={2024}, archivePrefix={arXiv}, eprint={2410.05500}, primaryClass={cs.CV cs.AI cs.LG} }
yu2024residual
arxiv-666782
2410.05501
Timeliness in NextG Spectrum Sharing under Jamming Attacks with Deep Learning
<|reference_start|>Timeliness in NextG Spectrum Sharing under Jamming Attacks with Deep Learning: We consider the communication of time-sensitive information in NextG spectrum sharing where a deep learning-based classifier is used to identify transmission attempts. While the transmitter seeks for opportunities to use the spectrum without causing interference to an incumbent user, an adversary uses another deep learning classifier to detect and jam the signals, subject to an average power budget. We consider timeliness objectives of NextG communications and study the Age of Information (AoI) under different scenarios of spectrum sharing and jamming, analyzing the effect of transmit control, transmit probability, and channel utilization subject to wireless channel and jamming effects. The resulting signal-to-noise-plus-interference (SINR) determines the success of spectrum sharing, but also affects the accuracy of the adversary's detection, making it more likely for the jammer to successfully identify and jam the communication. Our results illustrate the benefits of spectrum sharing for anti-jamming by exemplifying how a limited-power adversary is motivated to decrease its jamming power as the channel occupancy rises in NextG spectrum sharing with timeliness objectives.<|reference_end|>
arxiv
@article{costa2024timeliness, title={Timeliness in NextG Spectrum Sharing under Jamming Attacks with Deep Learning}, author={Maice Costa and Yalin E. Sagduyu}, journal={In proceedings on IEEE VTC-Fall 2024 Conference}, year={2024}, archivePrefix={arXiv}, eprint={2410.05501}, primaryClass={cs.IT math.IT} }
costa2024timeliness
arxiv-666783
2410.05506
Privacy Vulnerabilities in Marginals-based Synthetic Data
<|reference_start|>Privacy Vulnerabilities in Marginals-based Synthetic Data: When acting as a privacy-enhancing technology, synthetic data generation (SDG) aims to maintain a resemblance to the real data while excluding personally-identifiable information. Many SDG algorithms provide robust differential privacy (DP) guarantees to this end. However, we show that the strongest class of SDG algorithms--those that preserve \textit{marginal probabilities}, or similar statistics, from the underlying data--leak information about individuals that can be recovered more efficiently than previously understood. We demonstrate this by presenting a novel membership inference attack, MAMA-MIA, and evaluate it against three seminal DP SDG algorithms: MST, PrivBayes, and Private-GSD. MAMA-MIA leverages knowledge of which SDG algorithm was used, allowing it to learn information about the hidden data more accurately, and orders-of-magnitude faster, than other leading attacks. We use MAMA-MIA to lend insight into existing SDG vulnerabilities. Our approach went on to win the first SNAKE (SaNitization Algorithm under attacK ... $\varepsilon$) competition.<|reference_end|>
arxiv
@article{golob2024privacy, title={Privacy Vulnerabilities in Marginals-based Synthetic Data}, author={Steven Golob, Sikha Pentyala, Anuar Maratkhan, Martine De Cock}, journal={arXiv preprint arXiv:2410.05506}, year={2024}, archivePrefix={arXiv}, eprint={2410.05506}, primaryClass={cs.CR cs.LG} }
golob2024privacy
arxiv-666784
2410.05507
Structural Constraints for Physics-augmented Learning
<|reference_start|>Structural Constraints for Physics-augmented Learning: When the physics is wrong, physics-informed machine learning becomes physics-misinformed machine learning. A powerful black-box model should not be able to conceal misconceived physics. We propose two criteria that can be used to assert integrity that a hybrid (physics plus black-box) model: 0) the black-box model should be unable to replicate the physical model, and 1) any best-fit hybrid model has the same physical parameter as a best-fit standalone physics model. We demonstrate them for a sample nonlinear mechanical system approximated by its small-signal linearization.<|reference_end|>
arxiv
@article{kuang2024structural, title={Structural Constraints for Physics-augmented Learning}, author={Simon Kuang and Xinfan Lin}, journal={arXiv preprint arXiv:2410.05507}, year={2024}, archivePrefix={arXiv}, eprint={2410.05507}, primaryClass={cs.LG cs.SY eess.SY} }
kuang2024structural
arxiv-666785
2410.05509
Quadratically-Regularized Distributed Optimal Transport on Graphs
<|reference_start|>Quadratically-Regularized Distributed Optimal Transport on Graphs: Optimal transport on a graph focuses on finding the most efficient way to transfer resources from one distribution to another while considering the graph's structure. This paper introduces a new distributed algorithm that solves the optimal transport problem on directed, strongly connected graphs, unlike previous approaches which were limited to bipartite graphs. Our algorithm incorporates quadratic regularization and guarantees convergence using the Alternating Direction Method of Multipliers (ADMM). Notably, it proves convergence not only with quadratic regularization but also in cases without it, whereas earlier works required strictly convex objective functions. In this approach, nodes are treated as agents that collaborate through local interactions to optimize the total transportation cost, relying only on information from their neighbors. Through numerical experiments, we show how quadratic regularization affects both convergence behavior and solution sparsity under different graph structures. Additionally, we provide a practical example that highlights the algorithm's robustness through its ability to adjust to topological changes in the graph.<|reference_end|>
arxiv
@article{mokhtari2024quadratically-regularized, title={Quadratically-Regularized Distributed Optimal Transport on Graphs}, author={Yacine Mokhtari, Emmanuel Moulay, Patrick Coirault, J'er^ome Le Ny}, journal={arXiv preprint arXiv:2410.05509}, year={2024}, archivePrefix={arXiv}, eprint={2410.05509}, primaryClass={math.OC cs.NA math.NA} }
mokhtari2024quadratically-regularized
arxiv-666786
2410.05510
Evaluation of Intel Max GPUs for CGYRO-based fusion simulations
<|reference_start|>Evaluation of Intel Max GPUs for CGYRO-based fusion simulations: Intel Max GPUs are a new option available to CGYRO fusion simulation users. This paper outlines the changes that were needed to successfully run CGYRO on Intel Max 1550 GPUs on TACC's Stampede3 HPC system and presents benchmark results obtained there. Benchmark results were also run on Stampede3 Intel Max CPUs, as well as NVIDIA A100 and AMD MI250X GPUs at other major HPC systems. The Intel Max GPUs are shown to perform comparably to the other tested GPUs for smaller simulations but are noticeably slower for larger ones. Moreover, Intel Max GPUs are significantly faster than the tested Intel Max CPUs on Stampede3.<|reference_end|>
arxiv
@article{sfiligoi2024evaluation, title={Evaluation of Intel Max GPUs for CGYRO-based fusion simulations}, author={Igor Sfiligoi, Jeff Candy and Emily A. Belli}, journal={arXiv preprint arXiv:2410.05510}, year={2024}, archivePrefix={arXiv}, eprint={2410.05510}, primaryClass={cs.DC physics.plasm-ph} }
sfiligoi2024evaluation
arxiv-666787
2410.05514
Toward General Object-level Mapping from Sparse Views with 3D Diffusion Priors
<|reference_start|>Toward General Object-level Mapping from Sparse Views with 3D Diffusion Priors: Object-level mapping builds a 3D map of objects in a scene with detailed shapes and poses from multi-view sensor observations. Conventional methods struggle to build complete shapes and estimate accurate poses due to partial occlusions and sensor noise. They require dense observations to cover all objects, which is challenging to achieve in robotics trajectories. Recent work introduces generative shape priors for object-level mapping from sparse views, but is limited to single-category objects. In this work, we propose a General Object-level Mapping system, GOM, which leverages a 3D diffusion model as shape prior with multi-category support and outputs Neural Radiance Fields (NeRFs) for both texture and geometry for all objects in a scene. GOM includes an effective formulation to guide a pre-trained diffusion model with extra nonlinear constraints from sensor measurements without finetuning. We also develop a probabilistic optimization formulation to fuse multi-view sensor observations and diffusion priors for joint 3D object pose and shape estimation. Our GOM system demonstrates superior multi-category mapping performance from sparse views, and achieves more accurate mapping results compared to state-of-the-art methods on the real-world benchmarks. We will release our code: https://github.com/TRAILab/GeneralObjectMapping.<|reference_end|>
arxiv
@article{liao2024toward, title={Toward General Object-level Mapping from Sparse Views with 3D Diffusion Priors}, author={Ziwei Liao, Binbin Xu, Steven L. Waslander}, journal={arXiv preprint arXiv:2410.05514}, year={2024}, archivePrefix={arXiv}, eprint={2410.05514}, primaryClass={cs.CV cs.AI cs.RO} }
liao2024toward
arxiv-666788
2410.05522
Scalar Field Prediction on Meshes Using Interpolated Multi-Resolution Convolutional Neural Networks
<|reference_start|>Scalar Field Prediction on Meshes Using Interpolated Multi-Resolution Convolutional Neural Networks: Scalar fields, such as stress or temperature fields, are often calculated in shape optimization and design problems in engineering. For complex problems where shapes have varying topology and cannot be parametrized, data-driven scalar field prediction can be faster than traditional finite element methods. However, current data-driven techniques to predict scalar fields are limited to a fixed grid domain, instead of arbitrary mesh structures. In this work, we propose a method to predict scalar fields on arbitrary meshes. It uses a convolutional neural network whose feature maps at multiple resolutions are interpolated to node positions before being fed into a multilayer perceptron to predict solutions to partial differential equations at mesh nodes. The model is trained on finite element von Mises stress fields, and once trained it can estimate stress values at each node on any input mesh. Two shape datasets are investigated, and the model has strong performance on both, with a median R-squared value of 0.91. We also demonstrate the model on a temperature field in a heat conduction problem, where its predictions have a median R-squared value of 0.99. Our method provides a potential flexible alternative to finite element analysis in engineering design contexts. Code and datasets are available online.<|reference_end|>
arxiv
@article{ferguson2024scalar, title={Scalar Field Prediction on Meshes Using Interpolated Multi-Resolution Convolutional Neural Networks}, author={Kevin Ferguson, Andrew Gillman, James Hardin, Levent Burak Kara}, journal={arXiv preprint arXiv:2410.05522}, year={2024}, doi={10.1115/1.4065782}, archivePrefix={arXiv}, eprint={2410.05522}, primaryClass={cs.LG} }
ferguson2024scalar
arxiv-666789
2410.05525
Generative Portrait Shadow Removal
<|reference_start|>Generative Portrait Shadow Removal: We introduce a high-fidelity portrait shadow removal model that can effectively enhance the image of a portrait by predicting its appearance under disturbing shadows and highlights. Portrait shadow removal is a highly ill-posed problem where multiple plausible solutions can be found based on a single image. While existing works have solved this problem by predicting the appearance residuals that can propagate local shadow distribution, such methods are often incomplete and lead to unnatural predictions, especially for portraits with hard shadows. We overcome the limitations of existing local propagation methods by formulating the removal problem as a generation task where a diffusion model learns to globally rebuild the human appearance from scratch as a condition of an input portrait image. For robust and natural shadow removal, we propose to train the diffusion model with a compositional repurposing framework: a pre-trained text-guided image generation model is first fine-tuned to harmonize the lighting and color of the foreground with a background scene by using a background harmonization dataset; and then the model is further fine-tuned to generate a shadow-free portrait image via a shadow-paired dataset. To overcome the limitation of losing fine details in the latent diffusion model, we propose a guided-upsampling network to restore the original high-frequency details (wrinkles and dots) from the input image. To enable our compositional training framework, we construct a high-fidelity and large-scale dataset using a lightstage capturing system and synthetic graphics simulation. Our generative framework effectively removes shadows caused by both self and external occlusions while maintaining original lighting distribution and high-frequency details. Our method also demonstrates robustness to diverse subjects captured in real environments.<|reference_end|>
arxiv
@article{yoon2024generative, title={Generative Portrait Shadow Removal}, author={Jae Shin Yoon, Zhixin Shu, Mengwei Ren, Xuaner Zhang, Yannick Hold-Geoffroy, Krishna Kumar Singh, He Zhang}, journal={arXiv preprint arXiv:2410.05525}, year={2024}, archivePrefix={arXiv}, eprint={2410.05525}, primaryClass={cs.CV} }
yoon2024generative
arxiv-666790
2410.05527
DOPL: Direct Online Preference Learning for Restless Bandits with Preference Feedback
<|reference_start|>DOPL: Direct Online Preference Learning for Restless Bandits with Preference Feedback: Restless multi-armed bandits (RMAB) has been widely used to model constrained sequential decision making problems, where the state of each restless arm evolves according to a Markov chain and each state transition generates a scalar reward. However, the success of RMAB crucially relies on the availability and quality of reward signals. Unfortunately, specifying an exact reward function in practice can be challenging and even infeasible. In this paper, we introduce Pref-RMAB, a new RMAB model in the presence of preference signals, where the decision maker only observes pairwise preference feedback rather than scalar reward from the activated arms at each decision epoch. Preference feedback, however, arguably contains less information than the scalar reward, which makes Pref-RMAB seemingly more difficult. To address this challenge, we present a direct online preference learning (DOPL) algorithm for Pref-RMAB to efficiently explore the unknown environments, adaptively collect preference data in an online manner, and directly leverage the preference feedback for decision-makings. We prove that DOPL yields a sublinear regret. To our best knowledge, this is the first algorithm to ensure $\tilde{\mathcal{O}}(\sqrt{T\ln T})$ regret for RMAB with preference feedback. Experimental results further demonstrate the effectiveness of DOPL.<|reference_end|>
arxiv
@article{xiong2024dopl:, title={DOPL: Direct Online Preference Learning for Restless Bandits with Preference Feedback}, author={Guojun Xiong, Ujwal Dinesha, Debajoy Mukherjee, Jian Li, Srinivas Shakkottai}, journal={arXiv preprint arXiv:2410.05527}, year={2024}, archivePrefix={arXiv}, eprint={2410.05527}, primaryClass={cs.LG math.OC stat.ML} }
xiong2024dopl:
arxiv-666791
2410.05530
VisDiff: SDF-Guided Polygon Generation for Visibility Reconstruction and Recognition
<|reference_start|>VisDiff: SDF-Guided Polygon Generation for Visibility Reconstruction and Recognition: The capability to learn latent representations plays a key role in the effectiveness of recent machine learning methods. An active frontier in representation learning is understanding representations for combinatorial structures which may not admit well-behaved local neighborhoods or distance functions. For example, for polygons, slightly perturbing vertex locations might lead to significant changes in their combinatorial structure and may even lead to invalid polygons. In this paper, we investigate representations to capture the underlying combinatorial structures of polygons. Specifically, we study the open problem of Visibility Reconstruction: Given a visibility graph G, construct a polygon P whose visibility graph is G. We introduce VisDiff, a novel diffusion-based approach to reconstruct a polygon from its given visibility graph G. Our method first estimates the signed distance function (SDF) of P from G. Afterwards, it extracts ordered vertex locations that have the pairwise visibility relationship given by the edges of G. Our main insight is that going through the SDF significantly improves learning for reconstruction. In order to train VisDiff, we make two main contributions: (1) We design novel loss components for computing the visibility in a differentiable manner and (2) create a carefully curated dataset. We use this dataset to benchmark our method and achieve 21% improvement in F1-Score over standard methods. We also demonstrate effective generalization to out-of-distribution polygon types and show that learning a generative model allows us to sample the set of polygons with a given visibility graph. Finally, we extend our method to the related combinatorial problem of reconstruction from a triangulation. We achieve 95% classification accuracy of triangulation edges and a 4% improvement in Chamfer distance compared to current architectures.<|reference_end|>
arxiv
@article{moorthy2024visdiff:, title={VisDiff: SDF-Guided Polygon Generation for Visibility Reconstruction and Recognition}, author={Rahul Moorthy and Volkan Isler}, journal={arXiv preprint arXiv:2410.05530}, year={2024}, archivePrefix={arXiv}, eprint={2410.05530}, primaryClass={cs.CG cs.LG} }
moorthy2024visdiff:
arxiv-666792
2410.05533
Information Design with Unknown Prior
<|reference_start|>Information Design with Unknown Prior: Classical information design models (e.g., Bayesian persuasion and cheap talk) require players to have perfect knowledge of the prior distribution of the state of the world. Our paper studies repeated persuasion problems in which the information designer does not know the prior. The information designer learns to design signaling schemes from repeated interactions with the receiver. We design learning algorithms for the information designer to achieve no regret compared to using the optimal signaling scheme with known prior, under two models of the receiver's decision-making. (1) The first model assumes that the receiver knows the prior and can perform posterior update and best respond to signals. In this model, we design a learning algorithm for the information designer with $O(\log T)$ regret in the general case, and another algorithm with $\Theta(\log \log T)$ regret in the case where the receiver has only two actions. (2) The second model assumes that the receiver does not know the prior and employs a no-regret learning algorithm to take actions. We show that the information designer can achieve regret $O(\sqrt{\mathrm{rReg}(T) T})$, where $\mathrm{rReg}(T)=o(T)$ is an upper bound on the receiver's learning regret. Our work thus provides a learning foundation for the problem of information design with unknown prior.<|reference_end|>
arxiv
@article{lin2024information, title={Information Design with Unknown Prior}, author={Tao Lin, Ce Li}, journal={arXiv preprint arXiv:2410.05533}, year={2024}, archivePrefix={arXiv}, eprint={2410.05533}, primaryClass={cs.GT cs.DS cs.LG econ.TH} }
lin2024information
arxiv-666793
2410.05534
Optimizing Tensor Computation Graphs with Equality Saturation and Monte Carlo Tree Search
<|reference_start|>Optimizing Tensor Computation Graphs with Equality Saturation and Monte Carlo Tree Search: The real-world effectiveness of deep neural networks often depends on their latency, thereby necessitating optimization techniques that can reduce a model's inference time while preserving its performance. One popular approach is to sequentially rewrite the input computation graph into an equivalent but faster one by replacing individual subgraphs. This approach gives rise to the so-called phase-ordering problem in which the application of one rewrite rule can eliminate the possibility to apply an even better one later on. Recent work has shown that equality saturation, a technique from compiler optimization, can mitigate this issue by first building an intermediate representation (IR) that efficiently stores multiple optimized versions of the input program before extracting the best solution in a second step. In practice, however, memory constraints prevent the IR from capturing all optimized versions and thus reintroduce the phase-ordering problem in the construction phase. In this paper, we present a tensor graph rewriting approach that uses Monte Carlo tree search to build superior IRs by identifying the most promising rewrite rules. We also introduce a novel extraction algorithm that can provide fast and accurate runtime estimates of tensor programs represented in an IR. Our approach improves the inference speedup of neural networks by up to 11% compared to existing methods.<|reference_end|>
arxiv
@article{hartmann2024optimizing, title={Optimizing Tensor Computation Graphs with Equality Saturation and Monte Carlo Tree Search}, author={Jakob Hartmann, Guoliang He, Eiko Yoneki}, journal={arXiv preprint arXiv:2410.05534}, year={2024}, archivePrefix={arXiv}, eprint={2410.05534}, primaryClass={cs.LG cs.AI} }
hartmann2024optimizing
arxiv-666794
2410.05536
On Feature Decorrelation in Cloth-Changing Person Re-identification
<|reference_start|>On Feature Decorrelation in Cloth-Changing Person Re-identification: Cloth-changing person re-identification (CC-ReID) poses a significant challenge in computer vision. A prevailing approach is to prompt models to concentrate on causal attributes, like facial features and hairstyles, rather than confounding elements such as clothing appearance. Traditional methods to achieve this involve integrating multi-modality data or employing manually annotated clothing labels, which tend to complicate the model and require extensive human effort. In our study, we demonstrate that simply reducing feature correlations during training can significantly enhance the baseline model's performance. We theoretically elucidate this effect and introduce a novel regularization technique based on density ratio estimation. This technique aims to minimize feature correlation in the training process of cloth-changing ReID baselines. Our approach is model-independent, offering broad enhancements without needing additional data or labels. We validate our method through comprehensive experiments on prevalent CC-ReID datasets, showing its effectiveness in improving baseline models' generalization capabilities.<|reference_end|>
arxiv
@article{wang2024on, title={On Feature Decorrelation in Cloth-Changing Person Re-identification}, author={Hongjun Wang, Jiyuan Chen, Renhe Jiang, Xuan Song, Yinqiang Zheng}, journal={arXiv preprint arXiv:2410.05536}, year={2024}, archivePrefix={arXiv}, eprint={2410.05536}, primaryClass={cs.CV cs.AI cs.IR} }
wang2024on
arxiv-666795
2410.05538
Online Dynamic Pricing for Electric Vehicle Charging Stations with Reservations
<|reference_start|>Online Dynamic Pricing for Electric Vehicle Charging Stations with Reservations: The transition to electric vehicles (EVs), coupled with the rise of renewable energy sources, will significantly impact the electric grid. Unlike conventional fuel sources, electricity for EVs is constrained by grid capacity, price fluctuations, and long EV charging times, requiring new pricing solutions to manage demand and supply. This paper proposes a model for online dynamic pricing of reserved EV charging services, including reservation, parking, and charging as a bundled service priced as a whole. Our approach focuses on the individual charging station operator, employing a stochastic demand model and online dynamic pricing based on expected demand. The proposed model uses a Markov Decision Process (MDP) formulation to optimize sequential pricing decisions for charging session requests. A key contribution is the novel definition and quantification of discretization error introduced by the discretization of the Poisson process for use in the MDP. The model's viability is demonstrated with a heuristic solution method based on Monte-Carlo tree search, offering a viable path for real-world application.<|reference_end|>
arxiv
@article{mrkos2024online, title={Online Dynamic Pricing for Electric Vehicle Charging Stations with Reservations}, author={Jan Mrkos, Anton'in Komenda, David Fiedler, and Jiv{r}'i Vokv{r}'inek}, journal={arXiv preprint arXiv:2410.05538}, year={2024}, archivePrefix={arXiv}, eprint={2410.05538}, primaryClass={cs.MA cs.AI} }
mrkos2024online
arxiv-666796
2410.05540
Game of Coding: Sybil Resistant Decentralized Machine Learning with Minimal Trust Assumption
<|reference_start|>Game of Coding: Sybil Resistant Decentralized Machine Learning with Minimal Trust Assumption: Coding theory plays a crucial role in ensuring data integrity and reliability across various domains, from communication to computation and storage systems. However, its reliance on trust assumptions for data recovery poses significant challenges, particularly in emerging decentralized systems where trust is scarce. To address this, the game of coding framework was introduced, offering insights into strategies for data recovery within incentive-oriented environments. The focus of the earliest version of the game of coding was limited to scenarios involving only two nodes. This paper investigates the implications of increasing the number of nodes in the game of coding framework, particularly focusing on scenarios with one honest node and multiple adversarial nodes. We demonstrate that despite the increased flexibility for the adversary with an increasing number of adversarial nodes, having more power is not beneficial for the adversary and is not detrimental to the data collector, making this scheme sybil-resistant. Furthermore, we outline optimal strategies for the data collector in terms of accepting or rejecting the inputs, and characterize the optimal noise distribution for the adversary.<|reference_end|>
arxiv
@article{nodehi2024game, title={Game of Coding: Sybil Resistant Decentralized Machine Learning with Minimal Trust Assumption}, author={Hanzaleh Akbari Nodehi, Viveck R. Cadambe, Mohammad Ali Maddah-Ali}, journal={arXiv preprint arXiv:2410.05540}, year={2024}, archivePrefix={arXiv}, eprint={2410.05540}, primaryClass={cs.IT cs.LG math.IT} }
nodehi2024game
arxiv-666797
2410.05545
Aiding Global Convergence in Federated Learning via Local Perturbation and Mutual Similarity Information
<|reference_start|>Aiding Global Convergence in Federated Learning via Local Perturbation and Mutual Similarity Information: Federated learning has emerged in the last decade as a distributed optimization paradigm due to the rapidly increasing number of portable devices able to support the heavy computational needs related to the training of machine learning models. Federated learning utilizes gradient-based optimization to minimize a loss objective shared across participating agents. To the best of our knowledge, the literature mostly lacks elegant solutions that naturally harness the reciprocal statistical similarity between clients to redesign the optimization procedure. To address this gap, by conceiving the federated network as a similarity graph, we propose a novel modified framework wherein each client locally performs a perturbed gradient step leveraging prior information about other statistically affine clients. We theoretically prove that our procedure, due to a suitably introduced adaptation in the update rule, achieves a quantifiable speedup concerning the exponential contraction factor in the strongly convex case compared with popular algorithms FedAvg and FedProx, here analyzed as baselines. Lastly, we legitimize our conclusions through experimental results on the CIFAR10 and FEMNIST datasets, where we show that our algorithm speeds convergence up to a margin of 30 global rounds compared with FedAvg while modestly improving generalization on unseen data in heterogeneous settings.<|reference_end|>
arxiv
@article{buttaci2024aiding, title={Aiding Global Convergence in Federated Learning via Local Perturbation and Mutual Similarity Information}, author={Emanuel Buttaci, Giuseppe Carlo Calafiore}, journal={arXiv preprint arXiv:2410.05545}, year={2024}, archivePrefix={arXiv}, eprint={2410.05545}, primaryClass={cs.LG math.OC} }
buttaci2024aiding
arxiv-666798
2410.05547
Understanding and Imitating Human-Robot Motion with Restricted Visual Fields
<|reference_start|>Understanding and Imitating Human-Robot Motion with Restricted Visual Fields: When working around humans, it is important to model their perception limitations in order to predict their behavior more accurately. In this work, we consider agents with a limited field of view, viewing range, and ability to miss objects within viewing range (e.g., transparency). By considering the observation model independently from the motion policy, we can better predict the agent's behavior by considering these limitations and approximating them. We perform a user study where human operators navigate a cluttered scene while scanning the region for obstacles with a limited field of view and range. Using imitation learning, we show that a robot can adopt a human's strategy for observing an environment with limitations on observation and navigate with minimal collision with dynamic and static obstacles. We also show that this learned model helps it successfully navigate a physical hardware vehicle in real time.<|reference_end|>
arxiv
@article{bhatt2024understanding, title={Understanding and Imitating Human-Robot Motion with Restricted Visual Fields}, author={Maulik Bhatt, HongHao Zhen, Monroe Kennedy III, Negar Mehr}, journal={arXiv preprint arXiv:2410.05547}, year={2024}, archivePrefix={arXiv}, eprint={2410.05547}, primaryClass={cs.RO} }
bhatt2024understanding
arxiv-666799
2410.05550
Aggregating Quantitative Relative Judgments: From Social Choice to Ranking Prediction
<|reference_start|>Aggregating Quantitative Relative Judgments: From Social Choice to Ranking Prediction: Quantitative Relative Judgment Aggregation (QRJA) is a new research topic in (computational) social choice. In the QRJA model, agents provide judgments on the relative quality of different candidates, and the goal is to aggregate these judgments across all agents. In this work, our main conceptual contribution is to explore the interplay between QRJA in a social choice context and its application to ranking prediction. We observe that in QRJA, judges do not have to be people with subjective opinions; for example, a race can be viewed as a "judgment" on the contestants' relative abilities. This allows us to aggregate results from multiple races to evaluate the contestants' true qualities. At a technical level, we introduce new aggregation rules for QRJA and study their structural and computational properties. We evaluate the proposed methods on data from various real races and show that QRJA-based methods offer effective and interpretable ranking predictions.<|reference_end|>
arxiv
@article{xu2024aggregating, title={Aggregating Quantitative Relative Judgments: From Social Choice to Ranking Prediction}, author={Yixuan Even Xu, Hanrui Zhang, Yu Cheng, Vincent Conitzer}, journal={arXiv preprint arXiv:2410.05550}, year={2024}, archivePrefix={arXiv}, eprint={2410.05550}, primaryClass={cs.GT} }
xu2024aggregating
arxiv-666800
2410.05551
Mis\`ere Connect Four is Solved
<|reference_start|>Mis\`ere Connect Four is Solved: Connect Four is a two-player game where each player attempts to be the first to create a sequence of four of their pieces, arranged horizontally, vertically, or diagonally, by dropping pieces into the columns of a grid of width seven and height six, in alternating turns. Mis\`ere Connect Four is played by the same rules, but with the opposite objective: do not connect four. This paper announces that Mis\`ere Connect Four is solved: perfect play by both sides leads to a second-player win. More generally, this paper also announces that Mis\`ere Connect $k$ played on a $w \times h$ board is also solved, but the outcome depends on the game's parameters $k$, $w$, and $h$, and may be a first-player win, a second-player win, or a draw. These results are constructive, meaning that we provide explicit strategies, thus enabling readers to impress their friends and foes alike with provably optimal play in the mis\`ere form of a table-top game for children.<|reference_end|>
arxiv
@article{steele2024mis\`ere, title={Mis\`ere Connect Four is Solved}, author={Robert Steele and Daniel B. Larremore}, journal={arXiv preprint arXiv:2410.05551}, year={2024}, archivePrefix={arXiv}, eprint={2410.05551}, primaryClass={math.CO cs.GT} }
steele2024mis\`ere