@inproceedings{foures:hal-04216627,
  title = {Experience in Specializing a Generic Realization Language for SPL Engineering at Airbus},
  author = {Foures, Damien and Acher, Mathieu and Barais, Olivier and Combemale, Benoit and J{\'e}z{\'e}quel, Jean-Marc and Kienzle, J{\"o}rg},
  url = {https://inria.hal.science/hal-04216627},
  booktitle = {{MODELS 2023 - 26th International Conference on Model-Driven Engineering Languages and Systems}},
  address = {V{\"a}ster{\aa}s, Sweden},
  organization = {{ACM and IEEE}},
  publisher = {{IEEE}},
  pages = {1-12},
  year = {2023},
  month = oct,
  pdf = {https://inria.hal.science/hal-04216627/file/Experience_in_Specializing_a_Generic_Realization_Language_for_SPL_Engineering_at_Airbus%20%285%29.pdf},
  hal_id = {hal-04216627},
  hal_version = {v1},
  note = {Best Paper Award},
  month_numeric = {10}
}
@article{lesoil:hal-03476464,
  title = {Input Sensitivity on the Performance of Configurable Systems: An Empirical Study},
  author = {Lesoil, Luc and Acher, Mathieu and Blouin, Arnaud and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://inria.hal.science/hal-03476464},
  journal = {{Journal of Systems and Software}},
  publisher = {{Elsevier}},
  pages = {1-18},
  year = {2023},
  doi = {10.1016/j.jss.2023.111671},
  keywords = {Input Sensitivity ; Software variability ; Performance prediction},
  pdf = {https://inria.hal.science/hal-03476464v2/file/main.pdf},
  hal_id = {hal-03476464},
  hal_version = {v2}
}
@inproceedings{acher:hal-04153310,
  title = {On Programming Variability with Large Language Model-based Assistant},
  author = {Acher, Mathieu and Duarte, Jos{\'e} Galindo and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://inria.hal.science/hal-04153310},
  booktitle = {{SPLC 2023 - 27th ACM International Systems and Software Product Lines Conference}},
  address = {Tokyo, Japan},
  organization = {{ACM}},
  publisher = {{ACM}},
  pages = {1-7},
  year = {2023},
  month = aug,
  doi = {10.1145/nnnnnnn.nnnnnnn},
  slides = {https://www.slideshare.net/acher/on-programming-variability-with-large-language-modelbased-assistant},
  keywords = {variability ; programming ; software product lines ; generative AI ; large language model ; variants},
  pdf = {https://inria.hal.science/hal-04153310/file/ProgrammingVariabilityGPT-SPLC23.pdf},
  hal_id = {hal-04153310},
  hal_version = {v1},
  month_numeric = {8}
}
@inproceedings{acher:hal-04160693,
  title = {Generative AI for Reengineering Variants into Software Product Lines: An Experience Report},
  author = {Acher, Mathieu and Martinez, Jabier},
  url = {https://inria.hal.science/hal-04160693},
  booktitle = {{SPLC 2023 - 27th ACM International Systems and Software Product Lines Conference}},
  address = {Tokyo, Japan},
  publisher = {{ACM}},
  series = {B},
  volume = {B},
  pages = {1-9},
  year = {2023},
  month = aug,
  doi = {10.1145/3579028.3609016},
  keywords = {generative AI ; software product line ; variability ; configuration ; variant ; large language model ; reverse engineering},
  pdf = {https://inria.hal.science/hal-04160693/file/FromVariants2SPLWithLLM%20%282%29.pdf},
  slides = {https://www.slideshare.net/acher/generative-ai-for-reengineering-variants-into-software-product-lines-an-experience-report},
  hal_id = {hal-04160693},
  hal_version = {v1},
  month_numeric = {8}
}
@inproceedings{kebaili:hal-04126496,
  title = {Towards Leveraging Tests to Identify Impacts of Metamodel and Code Co-evolution},
  author = {Kebaili, Zohra Kaouter and Khelladi, Djamel Eddine and Acher, Mathieu and Barais, Olivier},
  url = {https://inria.hal.science/hal-04126496},
  booktitle = {{CAiSE 2023 - 35th International Conference on Advanced Information Systems Engineering}},
  address = {Zaragoza, Spain},
  publisher = {{Springer International Publishing}},
  series = {Lecture Notes in Business Information Processing},
  volume = {477},
  pages = {129-137},
  year = {2023},
  month = jun,
  doi = {10.1007/978-3-031-34674-3\_16},
  keywords = {Model evolution Code co-evolution Unit tests Testing co-evolution ; Model evolution ; Code co-evolution ; Unit tests ; Testing co-evolution},
  pdf = {https://inria.hal.science/hal-04126496/file/Towards_Leveraging_Tests_to_Identify_Impacts_of_Metamodel_and_Code_Co_evolution_CAISE23_forum.pdf},
  hal_id = {hal-04126496},
  hal_version = {v1},
  month_numeric = {6}
}
@inproceedings{acher:hal-03882594,
  title = {A Call for Removing Variability},
  author = {Acher, Mathieu and Lesoil, Luc and Randrianaina, Georges Aaron and T{\"e}rnava, Xhevahire and Zendra, Olivier},
  url = {https://hal.science/hal-03882594},
  booktitle = {{VaMoS 2023 - 17th International Working Conference on Variability Modelling of Software-Intensive Systems}},
  address = {Odense, Denmark},
  pages = {3},
  year = {2023},
  month = jan,
  doi = {10.1145/3571788.3571801},
  keywords = {software variability ; removing variability ; software bloat},
  pdf = {https://hal.science/hal-03882594/file/CR-HAL-paper13.pdf},
  hal_id = {hal-03882594},
  hal_version = {v1},
  month_numeric = {1}
}
@book{LMA2023,
  editor = {Lopez-Herrejon, Roberto and Martinez, Jabier and Ziadi, Tewfik and Acher, Mathieu and Assunção, K.G. Wesley and Vergilio, Silvia},
  title = {Handbook of Re-Engineering Software Intensive Systems into Software Product Lines},
  publisher = {Springer International Publishing},
  year = {2023},
  url = {https://doi.org/10.1007/978-3-031-11686-5},
  doi = {10.1007/978-3-031-11686-5},
  isbn = {978-3-031-11685-8},
  timestamp = {Fri, 25 Nov 2022 15:45:20 +0100},
  biburl = {https://dblp.org/rec/books/sp/23/LMA2023.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  important = {1},
  twitter_id = {https://twitter.com/wesleyklewerton/status/1597174694259888128},
  abstract = { This handbook distils the wealth of expertise and knowledge from a large community of researchers and industrial practitioners in Software Product Lines (SPLs) gained through extensive and rigorous theoretical, empirical, and applied research. It is a timely compilation of well-established and cutting-edge approaches that can be leveraged by those facing the prevailing and daunting challenge of re-engineering their systems into SPLs. The selection of chapters provides readers with a wide and diverse perspective that reflects the complementary and varied expertise of the chapter authors. This perspective covers the re-engineering processes, from planning to execution. SPLs are families of systems that share common assets, allowing a disciplined software reuse. The adoption of SPL practices has shown to enable significant technical and economic benefits for the companies that employ them. However, successful SPLs rarely start from scratch, but instead, they usually start from a set of existing systems that must undergo well-defined re-engineering processes to unleash new levels of productivity and competitiveness. Practitioners will benefit from the lessons learned by the community, captured in the array of methodological and technological alternatives presented in the chapters of the handbook, and will gain the confidence for undertaking their own re-engineering challenges. Researchers and educators will find a valuable single-entry point to quickly become familiar with the state-of-the-art on the topic and the open research opportunities; including undergraduate, graduate students, and R&D engineers who want to have a comprehensive understanding of techniques in reverse engineering and re-engineering of variability-rich software systems. }
}
@article{acher:hal-03897639,
  title = {BURST: Benchmarking Uniform Random Sampling Techniques},
  author = {Acher, Mathieu and Perrouin, Gilles and Cordy, Maxime},
  url = {https://hal.inria.fr/hal-03897639},
  journal = {{Science of Computer Programming}},
  publisher = {{Elsevier}},
  year = {2023},
  month = jan,
  keywords = {configurable systems ; software product lines ; variability model ; sampling ; SAT ; benchmark ; feature model},
  pdf = {https://hal.inria.fr/hal-03897639/file/BURST_SCPJournalOSP-CR.pdf},
  hal_id = {hal-03897639},
  hal_version = {v1},
  abstract = { BURST is a benchmarking platform for uniform random sampling (URS) techniques. Given: i) the description of a sampling space provided as a Boolean formula (DIMACS), and ii) a sampling budget (time and strength of uniformity), BURST evaluates ten samplers for scalability and uniformity. BURST measures scalability based on the time required to produce a sample, and uniformity based on the state-of-the-art and proven statistical test Barbarik. BURST is easily extendable to new samplers and offers: i) 128 feature models (for highly-configurable systems), ii) many other models mined from the artificial intelligence/satisfiability solving benchmarks. BURST envisions supporting URS assessment and design across multiple research communities. },
  month_numeric = {1}
}
@inbook{DBLP:books/sp/23/0003TAPJ23,
  author = {Martin, Hugo and Temple, Paul and Acher, Mathieu and Pereira, Juliana Alves and J{\'e}z{\'e}quel, Jean-Marc},
  chapter = {Machine Learning for Feature Constraints Discovery},
  title = {Handbook of Re-Engineering Software Intensive Systems into Software Product Lines},
  pages = {175--196},
  publisher = {Springer International Publishing},
  year = {2023},
  url = {https://doi.org/10.1007/978-3-031-11686-5\_7},
  doi = {10.1007/978-3-031-11686-5\_7},
  timestamp = {Fri, 25 Nov 2022 15:54:20 +0100},
  biburl = {https://dblp.org/rec/books/sp/23/0003TAPJ23.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  abstract = { Constraints among features are central to the success and quality of software product lines (SPLs). Unfortunately, the number of potential interactions and dependencies, materialized as logical constraints, grows as the number of features increases in an SPL. In particular, it is easy to forget a constraint and thus mistakenly authorizes invalid products. Developers thus struggle to identify and track constraints throughout the engineering of more and more complex SPLs. In this chapter, we show how to leverage statistical machine learning (and more specifically decision trees) to automatically prevent the derivation of invalid products through the synthesis of constraints. The key principle is to try and test some product of an SPL and then identify what individual features or combinations of features (if any) are causing their non-validity (e.g., a product does not compile). A sample of derived products is used to train a classifier (here a decision tree but other classifiers might also be used as long as constraints can be easily extracted) that can classify any remaining products of the SPL. We illustrate the chapter through different application domains and software systems (a video generator, parametric programs for 3D printing, or the Linux kernel). We also discuss the cost, benefits, and applicability of the method. }
}
@article{DBLP:journals/corr/abs-2210-14082,
  author = {T{\"e}rnava, Xhevahire and Acher, Mathieu and Combemale, Beno{\^{\i}}t},
  title = {Specialization of Run-time Configuration Space at Compile-time: An Exploratory Study},
  journal = {CoRR},
  volume = {abs/2210.14082},
  year = {2022},
  url = {https://doi.org/10.48550/arXiv.2210.14082},
  doi = {10.48550/arXiv.2210.14082},
  eprinttype = {arXiv},
  eprint = {2210.14082},
  timestamp = {Mon, 31 Oct 2022 12:04:42 +0100},
  biburl = {https://dblp.org/rec/journals/corr/abs-2210-14082.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  abstract = { Numerous software systems are highly configurable through run-time options, such as command-line parameters. Users can tune some of the options to meet various functional and non-functional requirements such as footprint, security, or execution time. However, some options are never set for a given system instance, and their values remain the same whatever the use cases of the system. Herein, we design a controlled experiment in which the system's run-time configuration space can be specialized at compile-time and combinations of options can be removed on demand. We perform an in-depth study of the well-known x264 video encoder and quantify the effects of its specialization to its non-functional properties, namely on binary size, attack surface, and performance while ensuring its validity. Our exploratory study suggests that the configurable specialization of a system has statistically significant benefits on most of its analysed non-functional properties, which benefits depend on the number of the debloated options. While our empirical results and insights show the importance of removing code related to unused run-time options to improve software systems, an open challenge is to further automate the specialization process. }
}
@article{DBLP:journals/corr/abs-2210-14699,
  author = {D{\"{o}}derlein, Jean{-}Baptiste and Acher, Mathieu and Khelladi, Djamel Eddine and Combemale, Beno{\^{\i}}t},
  title = {Piloting Copilot and Codex: Hot Temperature, Cold Prompts, or Black Magic?},
  journal = {CoRR},
  volume = {abs/2210.14699},
  year = {2022},
  url = {https://doi.org/10.48550/arXiv.2210.14699},
  doi = {10.48550/arXiv.2210.14699},
  eprinttype = {arXiv},
  eprint = {2210.14699},
  timestamp = {Wed, 02 Nov 2022 14:44:47 +0100},
  biburl = {https://dblp.org/rec/journals/corr/abs-2210-14699.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  abstract = { Language models are promising solutions for tackling increasing complex problems. In software engineering, they recently attracted attention in code assistants, with programs automatically written in a given programming language from a programming task description in natural language. They have the potential to save time and effort when writing code. However, these systems are currently poorly understood, preventing them from being used optimally. In this paper, we investigate the various input parameters of two language models, and conduct a study to understand if variations of these input parameters (e.g. programming task description and the surrounding context, creativity of the language model, number of generated solutions) can have a significant impact on the quality of the generated programs. We design specific operators for varying input parameters and apply them over two code assistants (Copilot and Codex) and two benchmarks representing algorithmic problems (HumanEval and LeetCode). Our results showed that varying the input parameters can significantly improve the performance of language models. However, there is a tight dependency when varying the temperature, the prompt and the number of generated solutions, making potentially hard for developers to properly control the parameters to obtain an optimal result. This work opens opportunities to propose (automated) strategies for improving performance. }
}
@inproceedings{acher:hal-03720273,
  title = {Feature Subset Selection for Learning Huge Configuration Spaces: The case of Linux Kernel Size},
  author = {Acher, Mathieu and Martin, Hugo and Pereira, Juliana Alves and Lesoil, Luc and Blouin, Arnaud and J{\'e}z{\'e}quel, Jean-Marc and Khelladi, Djamel Eddine and Barais, Olivier},
  url = {https://hal.inria.fr/hal-03720273},
  booktitle = {{SPLC 2022 - 26th ACM International Systems and Software Product Line Conference}},
  address = {Graz, Austria},
  pages = {1-12},
  year = {2022},
  month = sep,
  doi = {10.1145/3546932.3546997},
  pdf = {https://hal.inria.fr/hal-03720273/file/SPLC_2022___Linux_Kernel_Size.pdf},
  hal_id = {hal-03720273},
  hal_version = {v1},
  slides = {https://www.slideshare.net/acher/feature-subset-selection-for-learning-huge-configuration-spaces-the-case-of-linux-kernel-size},
  abstract = { Linux kernels are used in a wide variety of appliances, many of them having strong requirements on the kernel size due to constraints such as limited memory or instant boot. With more than nine thousands of configuration options to choose from, developers and users of Linux actually spend significant effort to document, understand, and eventually tune (combinations of) options for meeting a kernel size. In this paper, we describe a large-scale endeavour automating this task and predicting a given Linux kernel binary size out of unmeasured configurations. We first experiment that state-of-theart solutions specifically made for configurable systems such as performance-influence models cannot cope with that number of options, suggesting that software product line techniques may need to be adapted to such huge configuration spaces. We then show that tree-based feature selection can learn a model achieving low prediction errors over a reduced set of options. The resulting model, trained on 95 854 kernel configurations, is fast to compute, simple to interpret and even outperforms the accuracy of learning without feature selection. },
  month_numeric = {9}
}
@inproceedings{jezequel:hal-03788437,
  title = {From feature models to feature toggles in practice},
  author = {J{\'e}z{\'e}quel, Jean-Marc and Kienzle, J{\"o}rg and Acher, Mathieu},
  url = {https://hal.inria.fr/hal-03788437},
  booktitle = {{SPLC 2022 - 26th ACM International Systems and Software Product Line Conference}},
  address = {Graz / Hybrid, Austria},
  publisher = {{ACM}},
  pages = {234-244},
  year = {2022},
  month = sep,
  doi = {10.1145/3546932.3547009},
  keywords = {Configuration ; Feature toggles and flags ; Binding times ; Variability},
  pdf = {https://hal.inria.fr/hal-03788437/file/Unifying_SPL_and_Feature_Flags%282%29.pdf},
  hal_id = {hal-03788437},
  hal_version = {v1},
  abstract = { Feature Toggles (often also referred to as Feature Flags) are a powerful technique, providing an alternative to maintaining multiple feature branches in source code. A condition within the code enables or disables a feature at runtime, hence providing a kind of runtime variability resolution. Several works have already identified the proximity of this concept with the notion of Feature found in Software Product Lines. In this paper, we propose to go one step further in unifying these concepts to provide a seamless transition between design time and runtime variability resolutions. We propose to model all the variability using a feature model. Then this feature model can be partially resolved at design time (yielding an incomplete product derivation), the unresolved variability being used to generate feature toggles that can be enabled/disabled at runtime. We first demonstrate these ideas on the toy example of the Expression Product Line, and then show how it can scale to build a configurable authentication system, where a partially resolved feature model can interface with popular feature toggle frameworks such as Togglz. },
  month_numeric = {9}
}
@inproceedings{ternava:hal-03627246,
  title = {Scratching the Surface of ./configure: Learning the Effects of Compile-Time Options on Binary Size and Gadgets},
  author = {T{\"e}rnava, Xhevahire and Acher, Mathieu and Lesoil, Luc and Blouin, Arnaud and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.archives-ouvertes.fr/hal-03627246},
  booktitle = {{ICSR 2022 - 20th International Conference on Software and Systems Reuse}},
  address = {Montpellier, France},
  pages = {1-18},
  year = {2022},
  month = jun,
  keywords = {Configurable systems ; compile-time variability ; binary size ; gadgets ; system security ; non-funcional properties ; statistical learning},
  pdf = {https://hal.archives-ouvertes.fr/hal-03627246/file/paper32-ICSR2022_v03.pdf},
  hal_id = {hal-03627246},
  hal_version = {v1},
  note = {Best Paper Award},
  important = {1},
  abstract = { Numerous software systems are configurable through compile-time options and the widely used ./configure. However, the combined effects of these options on binary's non-functional properties (size and attack surface) are often not documented, and or not well understood, even by experts. Our goal is to provide automated support for exploring and comprehending the configuration space (a.k.a., surface) of compile-time options using statistical learning techniques. In this paper, we perform an empirical study on four C-based configurable systems. We measure the variation of binary size and attack surface (by quantifying the number of code reuse gadgets) in over 400 compile-time configurations of a subject system. We then apply statistical learning techniques on top of our build infrastructure to identify how compile-time options relate to non-functional properties. Our results show that, by changing the default configuration, the system's binary size and gadgets vary greatly (roughly −79\% to 244\% and −77\% to 30\%, respectively). Then, we found out that identifying the most influential options can be accurately learned with a small training set, while their relative importance varies across size and attack surface for the same system. Practitioners can use our approach and artifacts to explore the effects of compile-time options in order to take informed decisions when configuring a system with ./configure. },
  month_numeric = {6}
}
@inproceedings{lesoil:hal-03624309,
  title = {Beware of the Interactions of Variability Layers When Reasoning about Evolution of MongoDB},
  author = {Lesoil, Luc and Acher, Mathieu and Blouin, Arnaud and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.archives-ouvertes.fr/hal-03624309},
  booktitle = {{ICPE 2022 - 13th ACM/SPEC International Conference on Performance Engineering}},
  address = {Beijing, China},
  pages = {1-5},
  year = {2022},
  month = apr,
  doi = {10.1145/3491204.3527489},
  pdf = {https://hal.archives-ouvertes.fr/hal-03624309/file/ICPE_2022___Data_challenge.pdf},
  hal_id = {hal-03624309},
  hal_version = {v1},
  abstract = { With commits and releases, hundreds of tests are run on varying conditions (e.g., over different hardware and workloads) that can help to understand evolution and ensure non-regression of software performance. We hypothesize that performance is not only sensitive to evolution of software, but also to different variability layers of its execution environment, spanning the hardware, the operating system, the build, or the workload processed by the software. Leveraging the MongoDB dataset, our results show that changes in hardware and workload can drastically impact performance evolution and thus should be taken into account when reasoning about evolution. An open problem resulting from this study is how to manage the variability layers in order to efficiently test the performance evolution of a software. },
  month_numeric = {4}
}
@inproceedings{templeEMSEAdvICSA,
  title = {Empirical Assessment of Generating Adversarial Configurations for Software Product Lines},
  author = {Temple, Paul and Perrouin, Gilles and Acher, Mathieu and Biggio, Battista and J{\'e}z{\'e}quel, Jean-Marc and Roli, Fabio},
  booktitle = {{International Conference on Software Architecture (ICSA), Journal First Track}},
  year = {2022},
  month = nov,
  keywords = {software product line ; software variability ; software testing ; ma- chine learning ; quality assurance},
  abstract = { Software product line (SPL) engineers put a lot of effort to ensure that, through the setting of a large number of possible configuration options, products are acceptable and well-tailored to customers' needs. Unfortunately, options and their mutual interactions create a huge configuration space which is intractable to exhaustively explore. Instead of testing all products, machine learning is increasingly employed to approximate the set of acceptable products out of a small training sample of configurations. Machine learning (ML) techniques can refine a software product line through learned constraints and a priori prevent non-acceptable products to be derived. In this paper, we use adversarial ML techniques to generate adver-sarial configurations fooling ML classifiers and pinpoint incorrect classifications of products (videos) derived from an industrial video generator. Our attacks yield (up to) a 100\% misclassification rate and a drop in accuracy of 5\%. We discuss the implications these results have on SPL quality assurance.  },
  month_numeric = {11}
}
@inproceedings{randrianaina:hal-03547219,
  title = {On the Benefits and Limits of Incremental Build of Software Configurations: An Exploratory Study},
  author = {Randrianaina, Georges Aaron and T{\"e}rnava, Xhevahire and Khelladi, Djamel Eddine and Acher, Mathieu},
  url = {https://hal.archives-ouvertes.fr/hal-03547219},
  booktitle = {{ICSE 2022 - 44th International Conference on Software Engineering}},
  address = {Pittsburgh, Pennsylvania / Virtual, United States},
  pages = {1-12},
  year = {2022},
  month = may,
  keywords = {Configurable ; Variability ; Build Systems ; Exploratory Study},
  pdf = {https://hal.archives-ouvertes.fr/hal-03547219v2/file/ICSE22_Incremental_Build_HAL.pdf},
  hal_id = {hal-03547219},
  hal_version = {v2},
  important = {1},
  abstract = { Software projects use build systems to automate the compilation, testing, and continuous deployment of their software products. As software becomes increasingly configurable, the build of multiple configurations is a pressing need, but expensive and challenging to implement. The current state of practice is to build independently (aka clean build) a software for a subset of configurations. While incremental build has been studied for software evolution and relatively small changes of the source code, it has surprisingly not been considered for software configurations. In this exploratory study, we examine the benefits and limits of building software configurations incrementally, rather than always building them cleanly. By using five real-life configurable systems as subjects, we explore whether incremental build works, outperforms a sequence of clean builds, is correct wrt clean build, and can be used to find an optimal ordering for building configurations. Our results show that incremental build is feasible in 100\% of the times in four subjects and in 78\% of the times in one subject. In average, 88.5\% of the configurations could be built faster with incremental build while also finding several alternatives faster incremental builds. However, only 60\% of faster incremental builds are correct. Still, when considering those correct incremental builds with clean builds, we could always find an optimal order that is faster than just a collection of clean builds with a gain up to 11.76\%. },
  month_numeric = {5}
}
@inproceedings{acher:hal-03528889,
  title = {Reproducible Science and Deep Software Variability},
  author = {Acher, Mathieu},
  url = {https://hal.inria.fr/hal-03528889},
  booktitle = {{VaMoS 2022 - 16th International Working Conference on Variability Modelling of Software-Intensive Systems}},
  address = {Florence, Italy},
  pages = {1-2},
  year = {2022},
  month = feb,
  pdf = {https://hal.inria.fr/hal-03528889/file/KeynoteVaMoSReproducibleScienceDeepVariability.pdf},
  hal_id = {hal-03528889},
  hal_version = {v1},
  abstract = { Biology, medicine, physics, astrophysics, chemistry: all these scientific domains need to process large amount of data with more and more complex software systems. For achieving reproducible science, there are several challenges ahead involving multidisciplinary collaboration and socio-technical innovation with software at the center of the problem. Despite the availability of data and code, several studies report that the same data analyzed with different software can lead to different results. I am seeing this problem as a manifestation of deep software variability: many factors (operating system, third-party libraries, versions, workloads, compile-time options and flags, etc.) themselves subject to variability can alter the results, up to the point it can dramatically change the conclusions of some scientific studies. In this keynote, I argue that deep software variability is a threat and also an opportunity for reproducible science. I first outline some works about (deep) software variability, reporting on preliminary evidence of complex interactions between variability layers. I then link the ongoing works on variability modelling and deep software variability in the quest for reproducible science. },
  month_numeric = {2}
}
@inproceedings{lesoil:hal-03514984,
  title = {Transferring Performance between Distinct Configurable Systems : A Case Study},
  author = {Lesoil, Luc and Martin, Hugo and Acher, Mathieu and Blouin, Arnaud and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.inria.fr/hal-03514984},
  booktitle = {{VaMoS 2022 - 16th International Working Conference on Variability Modelling of Software-Intensive Systems}},
  address = {Florence, Italy},
  pages = {1-6},
  year = {2022},
  month = feb,
  doi = {10.1145/3510466.3510486},
  pdf = {https://hal.inria.fr/hal-03514984/file/VaMoS_22_tracsoft.pdf},
  hal_id = {hal-03514984},
  hal_version = {v1},
  abstract = { Many research studies predict the performance of configurable software using machine learning techniques, thus requiring large amounts of data. Transfer learning aims to reduce the amount of data needed to train these models and has been successfully applied on different executing environments (hardware) or software versions. In this paper we investigate for the first time the idea of applying transfer learning between distinct configurable systems. We design a study involving two video encoders (namely x264 and x265) coming from different code bases. Our results are encouraging since transfer learning outperforms traditional learning for two performance properties (out of three). We discuss the open challenges to overcome for a more general application. },
  month_numeric = {2}
}
@inproceedings{ternava:hal-03527250,
  title = {On the Interaction of Feature Toggles},
  author = {T{\"e}rnava, Xhevahire and Lesoil, Luc and Randrianaina, Georges Aaron and Khelladi, Djamel Eddine and Acher, Mathieu},
  url = {https://hal.archives-ouvertes.fr/hal-03527250},
  booktitle = {{VaMoS 2022 - 16th International Working Conference on Variability Modelling of Software-Intensive Systems}},
  address = {Florence, Italy},
  year = {2022},
  month = feb,
  doi = {10.1145/3510466.3510485},
  keywords = {feature flags ; continuous deployment ; interaction of feature toggles ; feature toggles},
  pdf = {https://hal.archives-ouvertes.fr/hal-03527250v2/file/VaMoS22-paper8-HAL.pdf},
  hal_id = {hal-03527250},
  hal_version = {v2},
  abstract = { Feature toggling is a technique for enabling branching-in-code. It is increasingly used during continuous deployment to incrementally test and integrate new features before their release. In principle, feature toggles tend to be light, that is, they are defined as simple Boolean flags and used in conditional statements to condition the activation of some software features. However, there is a lack of knowledge on whether and how they may interact with each other, in that case their enabling and testing become complex. We argue that finding the interactions of feature toggles is valuable for developers to know which of them should be enabled at the same time, which are impacted by a removed toggle, and to avoid their mis-configurations. In this work, we mine feature toggles and their interactions in five open-source projects. We then analyse how they are realized and whether they tend to be multiplied over time. Our results show that 7\% of feature toggles interact with each other, 33\% of them interact with another code expression, and their interactions tend to increase over time (22\%, on average). Further, their interactions are expressed by simple logical operators (i.e., and and or) and nested if statements. We propose to model them into a Feature Toggle Model, and believe that our results are helpful towards robust management approaches of feature toggles. },
  month_numeric = {2}
}
@inproceedings{randrianaina:hal-03558479,
  title = {Towards Incremental Build of Software Configurations},
  author = {Randrianaina, Georges Aaron and Khelladi, Djamel Eddine and Zendra, Olivier and Acher, Mathieu},
  url = {https://hal.archives-ouvertes.fr/hal-03558479},
  booktitle = {{ICSE-NIER 2022 - 44th International Conference on Software Engineering -- New Ideas and Emerging Results}},
  address = {Pittsburgh, PA, United States},
  pages = {1-5},
  year = {2022},
  month = may,
  doi = {10.1145/3510455.3512792},
  keywords = {Highly configurable system ; Build system ; Incremental build},
  pdf = {https://hal.archives-ouvertes.fr/hal-03558479/file/ICSE22_NIER_HAL.pdf},
  hal_id = {hal-03558479},
  hal_version = {v1},
  important = {1},
  abstact = {{ Building software is a crucial task to compile, test, and deploy software systems while continuously ensuring quality. As software is more and more configurable, building multiple configurations is a pressing need, yet, costly and challenging to instrument. The common practice is to independently build (a.k.a., clean build) a software for a subset of configurations. While incremental build has been considered for software evolution and relatively small modifications of the source code, it has surprisingly not been considered for software configurations. In this vision paper, we formulate the hypothesis that incremental build can reduce the cost of exploring the configuration space of software systems. We detail how we apply incremental build for two real-world application scenarios and conduct a preliminary evaluation on two case studies, namely x264 and Linux Kernel. For x264, we found that one can incrementally build configurations in an order such that overall build time is reduced. Nevertheless, we could not find any optimal order with the Linux Kernel, due to a high distance between random configurations. Therefore, we show it is possible to control the process of generating configurations: we could reuse commonality and gain up to 66% of build time compared to only clean builds. }},
  month_numeric = {5}
}
@article{DBLP:journals/corr/abs-2112-07279,
  author = {Lesoil, Luc and Acher, Mathieu and Blouin, Arnaud and J{\'{e}}z{\'{e}}quel, Jean{-}Marc},
  title = {The Interaction between Inputs and Configurations fed to Software Systems: an Empirical Study},
  journal = {CoRR},
  volume = {abs/2112.07279},
  year = {2021},
  url = {https://arxiv.org/abs/2112.07279},
  eprinttype = {arXiv},
  eprint = {2112.07279},
  timestamp = {Mon, 03 Jan 2022 15:45:35 +0100},
  biburl = {https://dblp.org/rec/journals/corr/abs-2112-07279.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  abstract = { Widely used software systems such as video encoders are by necessity highly configurable, with hundreds or even thousands of options to choose from. Their users often have a hard time finding suitable values for these options (i.e. finding a proper configuration of the software system) to meet their goals for the tasks at hand, e.g. compress a video down to a certain size. One dimension of the problem is of course that performance depends on the input data: a video as input to an encoder like x264 or a file system fed to a tool like xz. To achieve good performance, users should therefore take into account both dimensions of (1) software variability and (2) input data. In this problem-statement paper, we conduct a large study over 8 configurable systems that quantifies the existing interactions between input data and configurations of software systems. The results exhibit that (1) inputs fed to software systems interact with their configuration options in non monotonous ways, significantly impacting their performance properties (2) tuning a software system for its input data makes it possible to multiply its performance by up to ten (3) input variability can jeopardize the relevance of performance predictive models for a field deployment. },
  twitter_id = {https://twitter.com/lesoil_l/status/1491599457074528257}
}
@phdthesis{acher:tel-03521806,
  title = {Modelling, Reverse Engineering, and Learning Software Variability},
  author = {Acher, Mathieu},
  url = {https://hal.inria.fr/tel-03521806},
  school = {{Universit{\'e} de Rennes 1}},
  year = {2021},
  month = nov,
  keywords = {software product lines variability configuration learning modelling reverse engineering ; logiciel variabilit{\'e} configuration},
  type = {Habilitation {\`a} diriger des recherches},
  pdf = {https://hal.inria.fr/tel-03521806/file/HDRAcherVariability.pdf},
  hal_id = {tel-03521806},
  hal_version = {v1},
  sorte = {these},
  month_numeric = {11}
}
@article{martin:hal-03358817,
  title = {Transfer Learning Across Variants and Versions: The Case of Linux Kernel Size},
  author = {Martin, Hugo and Acher, Mathieu and Pereira, Juliana Alves and Lesoil, Luc and J{\'e}z{\'e}quel, Jean-Marc and Khelladi, Djamel Eddine},
  url = {https://hal.inria.fr/hal-03358817},
  journal = {{IEEE Transactions on Software Engineering}},
  publisher = {{Institute of Electrical and Electronics Engineers}},
  volume = {48},
  number = {11},
  pages = {4274--4290},
  year = {2022},
  keywords = {Index Terms-Software Product Line ; Software Evolution ; Machine Learning ; Transfer Learning ; Performance Prediction},
  pdf = {https://hal.inria.fr/hal-03358817/file/TransferLinux__hal.pdf},
  hal_id = {hal-03358817},
  hal_version = {v1},
  important = {1},
  twitter_id = {https://twitter.com/acherm/status/1446399342236078080},
  abstract = { With large scale and complex configurable systems, it is hard for users to choose the right combination of options (i.e., configurations) in order to obtain the wanted trade-off between functionality and performance goals such as speed or size. Machine learning can help in relating these goals to the configurable system options, and thus, predict the effect of options on the outcome, typically after a costly training step. However, many configurable systems evolve at such a rapid pace that it is impractical to retrain a new model from scratch for each new version. In this paper, we propose a new method to enable transfer learning of binary size predictions among versions of the same configurable system. Taking the extreme case of the Linux kernel with its ≈ 14, 500 configuration options, we first investigate how binary size predictions of kernel size degrade over successive versions. We show that the direct reuse of an accurate prediction model from 2017 quickly becomes inaccurate when Linux evolves, up to a 32\% mean error by August 2020. We thus propose a new approach for transfer evolution-aware model shifting (TEAMS). It leverages the structure of a configurable system to transfer an initial predictive model towards its future versions with a minimal amount of extra processing for each version. We show that TEAMS vastly outperforms state of the art approaches over the 3 years history of Linux kernels, from 4.13 to 5.8. }
}
@article{alvespereira:hal-02148791,
  title = {Learning Software Configuration Spaces: A Systematic Literature Review},
  author = {Alves Pereira, Juliana and Martin, Hugo and Acher, Mathieu and J{\'e}z{\'e}quel, Jean-Marc and Botterweck, Goetz and Ventresque, Anthony},
  url = {https://hal.inria.fr/hal-02148791},
  journal = {{Journal of Systems and Software}},
  publisher = {{Elsevier}},
  year = {2021},
  month = aug,
  doi = {10.1016/j.jss.2021.111044},
  keywords = {Machine Learning ; Configurable Systems ; Software Product Lines ; Systematic Literature Review},
  pdf = {https://hal.inria.fr/hal-02148791v2/file/Survey_MachineLearningConfiguration_RevisedJSS.pdf},
  hal_id = {hal-02148791},
  hal_version = {v2},
  important = {1},
  twitter_id = {https://twitter.com/acherm/status/1445731364888666117},
  abstract = { Most modern software systems (operating systems like Linux or Android, Web browsers like Firefox or Chrome, video encoders like ffmpeg, x264 or VLC, mobile and cloud applications, etc.) are highly configurable. Hundreds of configuration options, features, or plugins can be combined, each potentially with distinct functionality and effects on execution time, security, energy consumption, etc. Due to the combinatorial explosion and the cost of executing software, it is quickly impossible to exhaustively explore the whole configuration space. Hence, numerous works have investigated the idea of learning it from a small sample of configurations' measurements. The pattern ``sampling, measuring, learning" has emerged in the literature, with several practical interests for both software developers and end-users of configurable systems. In this systematic literature review, we report on the different application objectives (e.g., performance prediction, configuration optimization, constraint mining), use-cases, targeted software systems, and application domains. We review the various strategies employed to gather a representative and cost-effective sample. We describe automated software techniques used to measure functional and non-functional properties of configurations. We classify machine learning algorithms and how they relate to the pursued application. Finally, we also describe how researchers evaluate the quality of the learning process. The findings from this systematic review show that the potential application objective is important; there are a vast number of case studies reported in the literature related to particular domains or software systems. Yet, the huge variant space of configurable systems is still challenging and calls to further investigate the synergies between artificial intelligence and software engineering.  },
  month_numeric = {8}
}
@inproceedings{acherBURST2021,
  author = {Acher, Mathieu and Perrouin, Gilles and Cordy, Maxime},
  title = {BURST: A Benchmarking Platform for Uniform Random Sampling Techniques},
  year = {2021},
  isbn = {9781450384704},
  publisher = {Association for Computing Machinery},
  address = {New York, NY, USA},
  url = {https://doi.org/10.1145/3461002.3473070},
  doi = {10.1145/3461002.3473070},
  abstract = { We present BURST, a benchmarking platform for uniform random sampling techniques. With BURST, researchers have a flexible, controlled environment in which they can evaluate the scalability and uniformity of their sampling. BURST comes with an extensive --- and extensible --- benchmark dataset comprising 128 feature models, including challenging, real-world models of the Linux kernel. BURST takes as inputs a sampling tool, a set of feature models and a sampling budget. It automatically translates any feature model of the set in DIMACS and invokes the sampling tool to generate the budgeted number of samples. To evaluate the scalability of the sampling tool, BURST measures the time the tool needs to produce the requested sample. To evaluate the uniformity of the produced sample, BURST integrates the state-of-the-art and proven statistical test Barbarik. We envision BURST to become the starting point of a standardisation initiative of sampling tool evaluation. Given the huge interest of research for sampling algorithms and tools, this initiative would have the potential to reach and crosscut multiple research communities including AI, ML, SAT and SPL. },
  booktitle = {Proceedings of the 25th ACM International Systems and Software Product Line Conference - Volume B},
  pages = {36–40},
  numpages = {5},
  keywords = {software product lines, variability model, SAT, benchmark, configurable systems, sampling},
  location = {Leicester, United Kindom},
  series = {SPLC '21},
  youtube_id = {https://www.youtube.com/watch?v=sSKosyrfitA},
  twitter_id = {https://twitter.com/acherm/status/1436360161061392385}
}
@inproceedings{lesoil:hal-03286127,
  title = {The Interplay of Compile-time and Run-time Options for Performance Prediction},
  author = {Lesoil, Luc and Acher, Mathieu and T{\"e}rnava, Xhevahire and Blouin, Arnaud and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.archives-ouvertes.fr/hal-03286127},
  booktitle = {{SPLC 2021 - 25th ACM International Systems and Software Product Line Conference - Volume A}},
  address = {Leicester, United Kingdom},
  publisher = {{ACM}},
  pages = {1-12},
  year = {2021},
  month = sep,
  doi = {10.1145/3461001.3471149},
  pdf = {https://hal.archives-ouvertes.fr/hal-03286127/file/Interplay_compile_runtime.pdf},
  hal_id = {hal-03286127},
  hal_version = {v1},
  twitter_id = {https://twitter.com/acherm/status/1435965581904146439},
  youtube_id = {https://www.youtube.com/watch?v=kVh4VwQUzv8},
  abstract = { Many software projects are configurable through compile-time options (e.g., using ./configure) and also through run-time options (e.g., command-line parameters, fed to the software at execution time). Several works have shown how to predict the effect of run-time options on performance. However it is yet to be studied how these prediction models behave when the software is built with different compile-time options. For instance, is the best run-time configuration always the best w.r.t. the chosen compilation options? In this paper, we investigate the effect of compile-time options on the performance distributions of 4 software systems. There are cases where the compiler layer effect is linear which is an opportunity to generalize performance models or to tune and measure runtime performance at lower cost. We also prove there can exist an interplay by exhibiting a case where compile-time options significantly alter the performance distributions of a configurable system. },
  month_numeric = {9}
}
@inproceedings{martin:hal-03335263,
  title = {A comparison of performance specialization learning for configurable systems},
  author = {Martin, Hugo and Acher, Mathieu and Pereira, Juliana Alves and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.archives-ouvertes.fr/hal-03335263},
  booktitle = {{SPLC 2021 - 25th ACM International Systems and Software Product Line Conference}},
  address = {Leicester, United Kingdom},
  publisher = {{ACM}},
  pages = {1-11},
  year = {2021},
  month = sep,
  doi = {10.1145/3461001.3471155},
  pdf = {https://hal.archives-ouvertes.fr/hal-03335263/file/PerformanceSpecialization.pdf},
  hal_id = {hal-03335263},
  hal_version = {v1},
  important = {1},
  twitter_id = {https://twitter.com/acherm/status/1435611528154189828},
  youtube_id = {https://www.youtube.com/watch?v=DC6GeqISf6E},
  note = {Best Paper Award},
  abstract = { The specialization of the configuration space of a software system has been considered for targeting specific configuration profiles, usages, deployment scenarios, or hardware settings. The challenge is to find constraints among options' values that only retain configurations meeting a performance objective. Since the exponential nature of configurable systems makes a manual specialization unpractical, several approaches have considered its automation using machine learning, i.e., measuring a sample of configurations and then learning what options' values should be constrained. Even focusing on learning techniques based on decision trees for their built-in explainability, there is still a wide range of possible approaches that need to be evaluated, i.e., how accurate is the specialization with regards to sampling size, performance thresholds, and kinds of configurable systems. In this paper, we compare six learning techniques: three variants of decision trees (including a novel algorithm) with and without the use of model-based feature selection. We first perform a study on 8 configurable systems considered in previous related works and show that the accuracy reaches more than 90\% and that feature selection can improve the results in the majority of cases. We then perform a study on the Linux kernel and show that these techniques performs as well as on the other systems. Overall, our results show that there is no one-size-fits-all learning variant (though high accuracy can be achieved): we present guidelines and discuss tradeoffs. },
  month_numeric = {9}
}
@inproceedings{lesoil:hal-03084276,
  title = {Deep Software Variability: Towards Handling Cross-Layer Configuration},
  author = {Lesoil, Luc and Acher, Mathieu and Blouin, Arnaud and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.inria.fr/hal-03084276},
  booktitle = {{VaMoS 2021 - 15th International Working Conference on Variability Modelling of Software-Intensive Systems}},
  address = {Krems / Virtual, Austria},
  year = {2021},
  month = feb,
  pdf = {https://hal.inria.fr/hal-03084276v2/file/vision_paper_deep_software_variability.pdf},
  hal_id = {hal-03084276},
  hal_version = {v2},
  important = {1},
  abstract = { Configuring software is a powerful means to reach functional and performance goals of a system. However, many layers (hardware, operating system, input data, etc.), themselves subject to variability, can alter performances of software configurations. For instance, configurations' options of the x264 video encoder may have very different effects on x264's encoding time when used with different input videos, depending on the hardware on which it is executed. In this vision paper, we coin the term deep software variability to refer to the interaction of all external layers modifying the behavior or non-functional properties of a software. Deep software variability challenges practitioners and researchers: the combinatorial explosion of possible executing environments complicates the understanding, the configuration, the maintenance, the debug, and the test of configurable systems. There are also opportunities: harnessing all variability layers (and not only the software layer) can lead to more efficient systems and configuration knowledge that truly generalizes to any usage and context. },
  twitter_id = {https://twitter.com/lesoil_l/status/1489062896977989635},
  month_numeric = {2}
}
@proceedings{DBLP:conf/vamos/2020,
  editor = {Cordy, Maxime and Acher, Mathieu and Beuche, Danilo and Saake, Gunter},
  title = {VaMoS '20: 14th International Working Conference on Variability Modelling of Software-Intensive Systems, Magdeburg Germany, February 5-7, 2020},
  publisher = {{ACM}},
  year = {2020},
  url = {https://doi.org/10.1145/3377024},
  doi = {10.1145/3377024},
  isbn = {978-1-4503-7501-6},
  timestamp = {Sat, 08 Feb 2020 14:49:22 +0100},
  biburl = {https://dblp.org/rec/conf/vamos/2020.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/icse/KhelladiCABJ20,
  author = {Khelladi, Djamel Eddine and Combemale, Benoit and Acher, Mathieu and Barais, Olivier and J{\'e}z{\'e}quel, Jean-Marc},
  title = {Co-evolving code with evolving metamodels},
  booktitle = {{ICSE} '20: 42nd International Conference on Software Engineering, Seoul, South Korea, 27 June - 19 July, 2020},
  pages = {1496--1508},
  publisher = {{ACM}},
  year = {2020},
  url = {https://doi.org/10.1145/3377811.3380324},
  doi = {10.1145/3377811.3380324},
  important = {1},
  hal_id = {hal-03029429},
  youtube_id = {https://www.youtube.com/watch?v=zqiG-tEDJKk},
  timestamp = {Fri, 16 Oct 2020 10:56:00 +0200},
  biburl = {https://dblp.org/rec/conf/icse/KhelladiCABJ20.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  abstract = { Metamodels play a significant role to describe and analyze the relations between domain concepts. They are also cornerstone to build a software language (SL) for a domain and its associated tooling. Metamodel definition generally drives code generation of a core API. The latter is further enriched by developers with additional code implementing advanced functionalities, e.g., checkers, recommenders, etc. When a SL is evolved to the next version, the metamodels are evolved as well before to regenerate the core API code. As a result, the developers added code both in the core API and the SL toolings may be impacted and thus may need to be co-evolved accordingly. Many approaches support the co-evolution of various artifacts when metamodels evolve. However, not the co-evolution of code. This paper fills this gap. We propose a semi-automatic co-evolution approach based on change propagation. The premise is that knowledge of the metamodel evolution changes can be propagated by means of resolutions to drive the code co-evolution. Our approach leverages on the abstraction level of metamodels where a given metamodel element has often different usages in the code. It supports alternative co-evaluations to meet different developers needs. Our work is evaluated on three Eclipse SL implementations, namely OCL, Modisco, and Papyrus over several evolved versions of metamodels and code. In response to five different evolved metamodels, we co-evolved 976 impacts over 18 projects.A comparison of our co-evolved code with the versioned ones shows the usefulness of our approach. Our approach was able to reach a weighted average of 87.4\% and 88.9\% respectively of precision and recall while supporting useful alternative co-evolution that developers have manually performed. }
}
@inproceedings{DBLP:conf/icse/KhelladiCAB20,
  author = {Khelladi, Djamel Eddine and Combemale, Benoit and Acher, Mathieu and Barais, Olivier},
  title = {On the power of abstraction: a model-driven co-evolution approach of software code},
  booktitle = {{ICSE-NIER} 2020: 42nd International Conference on Software Engineering, New Ideas and Emerging Results, Seoul, South Korea, 27 June - 19 July, 2020},
  pages = {85--88},
  publisher = {{ACM}},
  year = {2020},
  url = {https://doi.org/10.1145/3377816.3381727},
  doi = {10.1145/3377816.3381727},
  youtube_id = {https://www.youtube.com/watch?v=aTesF4Oazvs},
  timestamp = {Mon, 28 Sep 2020 12:10:55 +0200},
  biburl = {https://dblp.org/rec/conf/icse/KhelladiCAB20.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  hal_id = {hal-03029426},
  abstract = {Model-driven software engineering fosters abstraction through the use of models and then automation by transforming them into various artefacts, in particular to code, for example: 1) from architectural models to code, 2) from metamodels to API code (with EMF in Eclipse), 3) from entity models to front-end and back-end code in Web stack application (with JHispter), etc. In all these examples, the generated code is usually enriched by developers with additional code implementing advanced functionalities (e.g., checkers, recommenders, etc.) to build a full coherent system. When the system must evolve, so are the models to regenerate the code. As a result, the developers' enriched code may be impacted and thus need to co-evolve accordingly. Many approaches support the co-evolution of various artifacts, but not the co-evolution of code. This paper sheds light on this issue and envisions to fill this gap. We formulate the hypothesis that the code co-evolution can be driven by the model changes by means of change propagation. To investigate this hypothesis, we implemented a prototype for the case of metamodels and their accompanying code in EMF Eclipse. As a preliminary evaluation, we considered the case of the OCL Pivot metamodel evolution and its code co-evolution in two projects from version 3.2.2 to 3.4.4. Preliminary results confirms our hypothesis that model-driven evolution changes can effectively drive the code co-evolution. On 562 impacts in two projects' code by 221 metamodel changes, our approach was able to reach the average of 89% and 92,5% respectively of precision and recall. }
}
@inproceedings{DBLP:conf/splc/Pereira0TA20,
  author = {Pereira, Juliana Alves and Martin, Hugo and Temple, Paul and Acher, Mathieu},
  title = {Machine learning and configurable systems: a gentle introduction},
  booktitle = {{SPLC} '20: 24th {ACM} International Systems and Software Product Line Conference, Montreal, Quebec, Canada, October 19-23, 2020, Volume {A}},
  pages = {40:1},
  publisher = {{ACM}},
  year = {2020},
  url = {https://doi.org/10.1145/3382025.3414976},
  doi = {10.1145/3382025.3414976},
  hal_id = {hal-02287459},
  timestamp = {Thu, 29 Oct 2020 11:14:57 +0100},
  biburl = {https://dblp.org/rec/conf/splc/Pereira0TA20.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  abstract = { The goal of this tutorial is to give an introduction to how machine learning can be used to support activities related to the engineering of con gurable systems and software product lines. To the best of our knowledge, this is the rst practical tutorial in this trending eld. The tutorial is based on a systematic literature review and includes practical tasks (specialization, performance prediction) on real-world systems (VaryLaTeX, x264). }
}
@article{DBLP:journals/jot/BenniMAP20,
  author = {Benni, Benjamin and Mosser, S{\'{e}}bastien and Acher, Mathieu and Paillart, Mathieu},
  title = {Characterizing Black-box Composition Operators via Generated Tailored Benchmarks},
  journal = {J. Object Technol.},
  volume = {19},
  number = {2},
  pages = {7:1--20},
  year = {2020},
  url = {https://doi.org/10.5381/jot.2020.19.2.a7},
  doi = {10.5381/jot.2020.19.2.a7},
  timestamp = {Mon, 03 Aug 2020 08:33:29 +0200},
  biburl = {https://dblp.org/rec/journals/jot/BenniMAP20.bib},
  youtube_id = {https://www.youtube.com/watch?v=9Z3lmoH6g38},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  abstract = { The integration of a model composition operator into a system is a challenging task: the properties associated with such operators can drastically change how the developers will be able to use it. In this paper, we describe a modelling framework that allows a software developer, who is not an expert in model composition, to describe the interface of the operators she wants to use, and describe the properties she expects from them to fit her needs (e.g., idempotence, commutativity, associativity). This abstract description is used to pilot a property-based testing approach on generated code. We applied the approach to two case studies: feature model composition and Git merging. }
}
@article{DBLP:journals/sigsoft/FontanaPAAWCPD20,
  author = {Fontana, Francesca Arcelli and Perrouin, Gilles and Ampatzoglou, Apostolos and Acher, Mathieu and Walter, Bartosz and Cordy, Maxime and Palomba, Fabio and Devroey, Xavier},
  title = {MALTESQUE 2019 Workshop Summary},
  journal = {{ACM} {SIGSOFT} Softw. Eng. Notes},
  volume = {45},
  number = {1},
  pages = {34--35},
  year = {2020},
  url = {https://doi.org/10.1145/3375572.3375582},
  doi = {10.1145/3375572.3375582},
  timestamp = {Thu, 17 Sep 2020 12:05:37 +0200},
  biburl = {https://dblp.org/rec/journals/sigsoft/FontanaPAAWCPD20.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{templeEMSEAdv,
  title = {Empirical Assessment of Generating Adversarial Configurations for Software Product Lines},
  author = {Temple, Paul and Perrouin, Gilles and Acher, Mathieu and Biggio, Battista and J{\'e}z{\'e}quel, Jean-Marc and Roli, Fabio},
  journal = {{Empirical Software Engineering (ESE)}},
  year = {2020},
  month = nov,
  keywords = {software product line ; software variability ; software testing ; ma- chine learning ; quality assurance},
  abstract = { Software product line (SPL) engineers put a lot of effort to ensure that, through the setting of a large number of possible configuration options, products are acceptable and well-tailored to customers' needs. Unfortunately, options and their mutual interactions create a huge configuration space which is intractable to exhaustively explore. Instead of testing all products, machine learning is increasingly employed to approximate the set of acceptable products out of a small training sample of configurations. Machine learning (ML) techniques can refine a software product line through learned constraints and a priori prevent non-acceptable products to be derived. In this paper, we use adversarial ML techniques to generate adver-sarial configurations fooling ML classifiers and pinpoint incorrect classifications of products (videos) derived from an industrial video generator. Our attacks yield (up to) a 100\% misclassification rate and a drop in accuracy of 5\%. We discuss the implications these results have on SPL quality assurance.  },
  month_numeric = {11}
}
@proceedings{DBLP:conf/sigsoft/2019maltesque,
  editor = {Fontana, Francesca Arcelli and Walter, Bartosz and Ampatzoglou, Apostolos and Palomba, Fabio and Perrouin, Gilles and Acher, Mathieu and Cordy, Maxime and Devroey, Xavier},
  title = {Proceedings of the 3rd ACM SIGSOFT International Workshop on Machine Learning Techniques for Software Quality Evaluation, MaLTeSQuE@ESEC/SIGSOFT FSE 2019, Tallinn, Estonia, August 27, 2019},
  publisher = {{ACM}},
  year = {2019},
  url = {https://doi.org/10.1145/3340482},
  doi = {10.1145/3340482},
  isbn = {978-1-4503-6855-1},
  timestamp = {Tue, 27 Aug 2019 14:32:36 +0200},
  biburl = {https://dblp.org/rec/conf/sigsoft/2019maltesque.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{alvespereira:hal-02356290,
  title = {Sampling Effect on Performance Prediction of Configurable Systems: A Case Study},
  author = {Alves Pereira, Juliana and Acher, Mathieu and Martin, Hugo and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.inria.fr/hal-02356290},
  year = {2020},
  keywords = {Configurable Systems ; Performance Prediction ; Machine Learning ; Software Product Lines},
  pdf = {https://hal.inria.fr/hal-02356290/file/ICPE2020.pdf},
  hal_id = {hal-02356290},
  hal_version = {v1},
  booktitle = {International Conference on Performance Engineering (ICPE 2020)},
  abstract = { Numerous software systems are highly configurable and provide a myriad of configuration options that users can tune to fit their functional and performance requirements (e.g., execution time). Measuring all configurations of a system is the most obvious way to understand the effect of options and their interactions, but is too costly or infeasible in practice. Numerous works thus propose to measure only a few configurations (a sample) to learn and predict the performance of any combination of options’ values. A challenging issue is to sample a small and representative set of configurations that leads to a good accuracy of performance prediction models. A recent study devised a new algorithm, called distance-based sampling, that obtains state-of-the-art accurate performance predictions on different subject systems. In this paper, we replicate this study through an in-depth analysis of x264, a popular and configurable video encoder. We systematically measure 1,152 configurations of x264 with 17 input videos and two quantitative properties (encoding time and encoding size). Our goal is to understand whether there is a dominant sampling strategy over the very same subject system (x264), i.e., whatever the workload and targeted performance properties. The findings from this study show that random sampling leads to more accurate performance models. However, without considering random, there is no single “dominant" sampling, instead different strategies perform best on different inputs and non-functional properties, further challenging practitioners and researchers. },
  important = {1},
  youtube_id = {https://www.youtube.com/watch?v=jtXoYdHhVtI},
  note = {Best Paper Award}
}
@article{DBLP:journals/jss/AcherC19,
  author = {Acher, Mathieu and Cohen, Myra B.},
  title = {Special issue on systems and software product line engineering},
  journal = {Journal of Systems and Software (JSS)},
  volume = {154},
  pages = {110--111},
  year = {2019},
  url = {https://doi.org/10.1016/j.jss.2019.04.054},
  doi = {10.1016/j.jss.2019.04.054},
  timestamp = {Mon, 24 Feb 2020 15:59:48 +0100},
  biburl = {https://dblp.org/rec/journals/jss/AcherC19.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
@techreport{acher:hal-02314830,
  title = {Learning Very Large Configuration Spaces: What Matters for Linux Kernel Sizes},
  author = {Acher, Mathieu and Martin, Hugo and Pereira, Juliana Alves and Blouin, Arnaud and J{\'e}z{\'e}quel, Jean-Marc and Khelladi, Djamel Eddine and Lesoil, Luc and Barais, Olivier},
  url = {https://hal.inria.fr/hal-02314830},
  type = {Research Report},
  institution = {{Inria Rennes - Bretagne Atlantique}},
  year = {2019},
  month = oct,
  pdf = {https://hal.inria.fr/hal-02314830/file/LinuxSizePrediction.pdf},
  hal_id = {hal-02314830},
  hal_version = {v1},
  abstract = { Linux kernels are used in a wide variety of appliances, many of them having strong requirements on the kernel size due to constraints such as limited memory or instant boot. With more than ten thousands of configuration options to choose from, obtaining a suitable trade off between kernel size and functionality is an extremely hard problem. Developers, contributors, and users actually spend significant effort to document, understand, and eventually tune (combinations of) options for meeting a kernel size. In this paper, we investigate how machine learning can help explain what matters for predicting a given Linux kernel size. Unveiling what matters in such very large configuration space is challenging for two reasons: (1) whatever the time we spend on it, we can only build and measure a tiny fraction of possible kernel configurations; (2) the prediction model should be both accurate and interpretable. We compare different machine learning algorithms and demonstrate the benefits of specific feature encoding and selection methods to learn an accurate model that is fast to compute and simple to interpret. Our results are validated over 95,854 kernel configurations and show that we can achieve low prediction errors over a reduced set of options. We also show that we can extract interpretable information for refining documentation and experts' knowledge of Linux, or even assigning more sensible default values to options. },
  month_numeric = {10}
}
@techreport{acher:hal-02147012,
  title = {Learning From Thousands of Build Failures of Linux Kernel Configurations},
  author = {Acher, Mathieu and Martin, Hugo and Alves Pereira, Juliana and Blouin, Arnaud and Eddine Khelladi, Djamel and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.inria.fr/hal-02147012},
  type = {Technical Report},
  pages = {1-12},
  institution = {{Inria ; IRISA}},
  year = {2019},
  month = jun,
  keywords = {Linux kernel ; Index Terms-configurable systems ; software testing ; configurable systems ; software product lines ; build systems ; operating systems},
  pdf = {https://hal.inria.fr/hal-02147012/file/LinuxCompilation_BuildFailures.pdf},
  hal_id = {hal-02147012},
  hal_version = {v2},
  abstract = { The Linux kernel offers more than ten thousands configuration options that can be combined to build an almost infinite number of kernel variants. Developers and contributors spend significant effort and computational resources to continuously track and hopefully fix configurations that lead to build failures. In this experience paper, we report on our endeavor to develop an infrastructure, called TuxML, able to build any kernel configuration and learn what could explain or even prevent configurations' failures. Our results over 95,000+ configurations show that TuxML can accurately cluster 3,600+ failures, automatically trace the responsible configuration options, and learn by itself to avoid unnecessary and costly builds. Our large qualitative and quantitative analysis reveals insights about Linux itself (e.g., we only found 16 configuration bugs) and the difficulty to engineer a build infrastructure for configurable systems (e.g., a false positive failure may mask true configuration bugs). },
  month_numeric = {6}
}
@inproceedings{acher:hal-02342130,
  title = {Learning the Linux Kernel Configuration Space: Results and Challenges},
  author = {Acher, Mathieu},
  url = {https://hal.inria.fr/hal-02342130},
  booktitle = {{ELC Europe 2019 - Embedded Linux Conference Europe 2019}},
  address = {Lyon, France},
  pages = {1-49},
  year = {2019},
  youtube_id = {https://www.youtube.com/watch?v=UBghs-cwQX4},
  month = oct,
  pdf = {https://hal.inria.fr/hal-02342130/file/TuxML-OSS2019-final.pdf},
  hal_id = {hal-02342130},
  hal_version = {v1},
  month_numeric = {10}
}
@inproceedings{martin:hal-02287459,
  title = {Machine Learning and Configurable Systems: A Gentle Introduction},
  author = {Martin, Hugo and Pereira, Juliana Alves and Temple, Paul and Acher, Mathieu},
  url = {https://hal.inria.fr/hal-02287459},
  booktitle = {{SPLC 2019 - 23rd International Systems and Software Product Line Conference}},
  address = {Paris, France},
  publisher = {{ACM}},
  pages = {83-88},
  year = {2019},
  month = sep,
  doi = {10.1145/3336294.3342383},
  pdf = {https://hal.inria.fr/hal-02287459/file/SPLC_Tuto_ML_config_systems%20%284%29.pdf},
  hal_id = {hal-02287459},
  hal_version = {v1},
  month_numeric = {9}
}
@inproceedings{acher:hal-02268373,
  title = {Seventh international workshop on reverse variability engineering (REVE 2019)},
  author = {Acher, Mathieu and Ziadi, Tewfik and Lopez-Herrejon, Roberto E and Martinez, Jabier},
  url = {https://hal.archives-ouvertes.fr/hal-02268373},
  booktitle = {{SPLC 2019 - 23rd International Systems and Software Product Line Conference}},
  address = {Paris, France},
  publisher = {{ACM Press}},
  pages = {1},
  year = {2019},
  month = sep,
  hal_id = {hal-02268373},
  hal_version = {v1},
  month_numeric = {9}
}
@article{temple:hal-02177158,
  title = {Empirical Assessment of Multimorphic Testing},
  author = {Temple, Paul and Acher, Mathieu and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.inria.fr/hal-02177158},
  journal = {{IEEE Transactions on Software Engineering (TSE)}},
  publisher = {{Institute of Electrical and Electronics Engineers}},
  volume = {47},
  number = {7},
  pages = {1511--1527},
  year = {2021},
  month = jul,
  doi = {10.1109/TSE.2019.2926971},
  keywords = {performance ; test evaluation ; software testing ; configuration ; software product lines ; performance testing ; variability},
  pdf = {https://hal.inria.fr/hal-02177158/file/Leveraging_performance_variations_with_Multimorphic_Testing-TempleAcherJezequel-TSE.pdf},
  hal_id = {hal-02177158},
  hal_version = {v1},
  important = {1},
  abstract = { The performance of software systems (such as speed, memory usage, correct identification rate) tends to be an evermore important concern, often nowadays on par with functional correctness for critical systems.Systematically testing these performance concerns is however extremely difficult, in particular because there exists no theory underpinning the evaluation of a performance test suite, i.e., to tell the software developer whether such a test suite is "good enough" or even whether a test suite is better than another one. This paper proposes to apply Multimorphic testing and empirically assess the effectiveness of performance test suites of software systems coming from various domains. By analogy with mutation testing, our core idea is to leverage the typical configurability of these systems, and to check whether it makes any difference in the outcome of the tests: i.e., are some tests able to "kill" underperforming system configurations? More precisely, we propose a framework for defining and evaluating the coverage of a test suite with respect to a quantitative property of interest. Such properties can be the execution time, the memory usage or the success rate in tasks performed by a software system. This framework can be used to assess whether a new test case is worth adding to a test suite or to select an optimal test suite with respect to a property of interest. We evaluate several aspects of our proposal through 3 empirical studies carried out in different fields: object tracking in videos, object recognition in images, and code generators. },
  month_numeric = {7}
}
@inproceedings{temple:hal-02287616,
  title = {Towards Quality Assurance of Software Product Lines with Adversarial Configurations},
  author = {Temple, Paul and Acher, Mathieu and Perrouin, Gilles and Biggio, Battista and J{\'e}z{\'e}quel, Jean-Marc and Roli, Fabio},
  url = {https://hal.inria.fr/hal-02287616},
  booktitle = {{23rd International Systems and Software Product Line Conference}},
  address = {Paris, France},
  year = {2019},
  month = sep,
  keywords = {software product line ; software variability ; software testing ; ma- chine learning ; quality assurance},
  pdf = {https://hal.inria.fr/hal-02287616/file/Adversarial_Constraints_for_Variability_Models_SPLC2019%20%286%29.pdf},
  hal_id = {hal-02287616},
  hal_version = {v1},
  abstract = { Software product line (SPL) engineers put a lot of effort to ensure that, through the setting of a large number of possible configuration options, products are acceptable and well-tailored to customers' needs. Unfortunately, options and their mutual interactions create a huge configuration space which is intractable to exhaustively explore. Instead of testing all products, machine learning is increasingly employed to approximate the set of acceptable products out of a small training sample of configurations. Machine learning (ML) techniques can refine a software product line through learned constraints and a priori prevent non-acceptable products to be derived. In this paper, we use adversarial ML techniques to generate adver-sarial configurations fooling ML classifiers and pinpoint incorrect classifications of products (videos) derived from an industrial video generator. Our attacks yield (up to) a 100\% misclassification rate and a drop in accuracy of 5\%. We discuss the implications these results have on SPL quality assurance.  },
  month_numeric = {9}
}
@inproceedings{heinz:hal-02129131,
  title = {Discovering Indicators for Classifying Wikipedia Articles in a Domain: A Case Study on Software Languages},
  author = {Heinz, Marcel and Lämmel, Ralf and Acher, Mathieu},
  url = {https://hal.inria.fr/hal-02129131},
  booktitle = {{SEKE 2019 - The 31st International Conference on Software Engineering and Knowledge Engineering}},
  address = {Lisbonne, Portugal},
  pages = {1-6},
  year = {2019},
  month = jul,
  pdf = {https://hal.inria.fr/hal-02129131/file/Discovering_Indicators_for_Classifying_Wikipedia_Articles_in_a_Domain_subtitle_A_Case_Study_on_Software_Languages%20%2812%29.pdf},
  hal_id = {hal-02129131},
  hal_version = {v1},
  month_numeric = {7}
}
@inproceedings{plazar:hal-01991857,
  title = {Uniform Sampling of SAT Solutions for Configurable Systems: Are We There Yet?},
  author = {Plazar, Quentin and Acher, Mathieu and Perrouin, Gilles and Devroey, Xavier and Cordy, Maxime},
  url = {https://hal.inria.fr/hal-01991857},
  booktitle = {ICST 2019 - 12th International Conference on Software Testing, Verification, and Validation},
  address = {Xian, China},
  pages = {1-12},
  year = {2019},
  month = apr,
  keywords = {Software product lines ; Variability modeling ; SAT ; Configurable systems ; Software testing ; Uniform sampling},
  pdf = {https://hal.inria.fr/hal-01991857/file/Uniform_Sampling_of_SAT_Solutions_for_Configurable_Systems__Are_We_There_Yet____footnotesize_textsuperscript__%20%283%29.pdf},
  hal_id = {hal-01991857},
  hal_version = {v1},
  abstract = { Uniform or near-uniform generation of solutions for large satisfiability formulas is a problem of theoretical and practical interest for the testing community. Recent works proposed two algorithms (namely UniGen and QuickSampler) for reaching a good compromise between execution time and uniformity guarantees, with empirical evidence on SAT benchmarks. In the context of highly-configurable software systems (e.g., Linux), it is unclear whether UniGen and QuickSampler can scale and sample uniform software configurations. In this paper, we perform a thorough experiment on 128 real-world feature models. We find that UniGen is unable to produce SAT solutions out of such feature models. Furthermore, we show that QuickSampler does not generate uniform samples and that some features are either never part of the sample or too frequently present. Finally, using a case study, we characterize the impacts of these results on the ability to find bugs in a configurable system. Overall, our results suggest that we are not there: more research is needed to explore the cost-effectiveness of uniform sampling when testing large configurable systems. },
  month_numeric = {4}
}
@inproceedings{amand:hal-01990767,
  title = {Towards Learning-Aided Configuration in 3D Printing: Feasibility Study and Application to Defect Prediction},
  author = {Amand, Benoit and Cordy, Maxime and Heymans, Patrick and Acher, Mathieu and Temple, Paul and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.inria.fr/hal-01990767},
  booktitle = {{VaMoS 2019 - 13th International Workshop on Variability Modelling of Software-Intensive Systems}},
  address = {Leuven, Belgium},
  pages = {1-9},
  year = {2019},
  workshop = {1},
  month = feb,
  keywords = {Machine Learning ; Configuration ; Sampling ; 3D printing},
  pdf = {https://hal.inria.fr/hal-01990767/file/Towards_Learning_Aided_Configuration_in_3D_Printing%20%2811%29.pdf},
  hal_id = {hal-01990767},
  hal_version = {v1},
  abstract = { Configurators rely on logical constraints over parameters to aid users and determine the validity of a configuration. However, for some domains, capturing such configuration knowledge is hard, if not infeasible. This is the case in the 3D printing industry, where parametric 3D object models contain the list of parameters and their value domains, but no explicit constraints. This calls for a complementary approach that learns what configurations are valid based on previous experiences. In this paper, we report on preliminary experiments showing the capability of state-of-the-art classification algorithms to assist the configuration process. While machine learning holds its promises when it comes to evaluation scores, an in-depth analysis reveals the opportunity to combine the classifiers with constraint solvers. },
  month_numeric = {2}
}
@proceedings{DBLP:conf/kbse/2018mases,
  editor = {Perrouin, Gilles and Acher, Mathieu and Cordy, Maxime and Devroey, Xavier},
  title = {Proceedings of the 1st International Workshop on Machine Learning and Software Engineering in Symbiosis, MASES@ASE 2018, Montpellier, France, September 3, 2018},
  publisher = {{ACM}},
  year = {2018},
  url = {https://doi.org/10.1145/3243127},
  doi = {10.1145/3243127},
  timestamp = {Wed, 21 Nov 2018 12:44:20 +0100},
  biburl = {https://dblp.org/rec/bib/conf/kbse/2018mases},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{acher:hal-01829933,
  title = {Teaching Software Product Lines: A Snapshot of Current Practices and Challenges (Journal-First Abstract)},
  author = {Acher, Mathieu and Lopez-Herrejon, Roberto E and Rabiser, Rick},
  url = {https://hal.inria.fr/hal-01829933},
  booktitle = {{SPLC2018 - 22nd International Systems and Software Product Line Conference}},
  address = {Gothenburg, Sweden},
  pages = {1},
  year = {2018},
  month = sep,
  keywords = {Surveys and overviews ; Software Engineering Teaching ; Software Product Line Teaching ; $\bullet$ Social and professional topics $\rightarrow$ Computing education ; $\bullet$ Applied computing $\rightarrow$ Education ; $\bullet$ Software and its engineering $\rightarrow$ Software product lines ; Software Product Lines ; Variability Modeling},
  hal_id = {hal-01829933},
  hal_version = {v1},
  abstract = { This extended abstract summarizes our article entitled " Teaching Software Product Lines: A Snapshot of Current Practices and Challenges " published in the ACM Transactions on Computing Education , vol. 18 in 2017 (http://doi.acm.org/10.1145/3088440). The article reports on three initiatives we have conducted with scholars, educators, industry practitioners, and students to understand the connection between software product lines and education and to derive recommendations for educators to continue improving the state of practice of teaching SPLs. },
  month_numeric = {9}
}
@article{halin:hal-01829928,
  title = {Test them all, is it worth it? Assessing configuration sampling on the JHipster Web development stack},
  author = {Halin, Axel and Nuttinck, Alexandre and Acher, Mathieu and Devroey, Xavier and Perrouin, Gilles and Baudry, Benoit},
  url = {https://doi.org/10.1007/s10664-018-9635-4},
  note = {Empirical Software Engineering journal},
  journal = {{Empirical Software Engineering (ESE)}},
  publisher = {{Springer Verlag}},
  volume = {24},
  number = {2},
  pages = {674--717},
  year = {2019},
  doi = {10.07980},
  month = jul,
  hal_id = {hal-01829928},
  hal_version = {v1},
  important = {1},
  youtube_id = {https://www.youtube.com/watch?v=mjBmarVDBBo},
  abstract = { Many approaches for testing configurable software systems start from the same assumption: it is impossible to test all configurations. This motivated the definition of variability-aware abstractions and sampling techniques to cope with large configuration spaces. Yet, there is no theoretical barrier that prevents the exhaustive testing of all configurations by simply enumerating them, if the effort required to do so remains acceptable. Not only this: we believe there is lots to be learned by systematically and exhaustively testing a configurable system. In this case study, we report on the first ever endeavour to test all possible configurations of an industry-strength, open source configurable software system, JHipster, a popular code generator for web applications. We built a testing scaffold for the 26,000+ configurations of JHipster using a cluster of 80 machines during 4 nights for a total of 4,376 hours (182 days) CPU time. We find that 35.70\% configurations fail and we identify the feature interactions that cause the errors. We show that sampling strategies (like dissimilarity and 2-wise): (1) are more effective to find faults than the 12 default configurations used in the JHipster continuous integration; (2) can be too costly and exceed the available testing budget. We cross this quantitative analysis with the qualitative assessment of JHipster's lead developers. },
  month_numeric = {7}
}
@inproceedings{acher:hal-01659161,
  title = {VaryLaTeX: Learning Paper Variants That Meet Constraints},
  author = {Acher, Mathieu and Temple, Paul and J{\'e}z{\'e}quel, Jean-Marc and Galindo Duarte, Jos{\'e} Angel and Martinez, Jabier and Ziadi, Tewfik},
  url = {https://hal.inria.fr/hal-01659161},
  booktitle = {12th International Workshop on Variability Modelling of Software-intensive Systems (VaMoS'18)},
  address = {Madrid, Spain},
  year = {2018},
  workshop = {1},
  month = feb,
  hal_id = {hal-01659161},
  hal_version = {v1},
  youtube_id = {https://www.youtube.com/watch?v=u1ralqbHCyM},
  abstract = { How to submit a research paper, a technical report, a grant proposal, or a curriculum vitae that respect imposed constraints such as formatting instructions and page limits? It is a challenging task, especially with time pressure and deadlines.  In this paper, we present a solution based on variability, constraint programming, and machine learning techniques for documents written in LaTeX.  Users simply have to annotate LaTeX source files with variability information for e.g., (de)activating portions of text, tuning the figures' sizes, or tweaking line spacing. Then, a fully automated procedure learns constraints among Boolean and numerical features for avoiding non-acceptable paper variants.  As a result, users can configure their papers (e.g., for controlling the aesthetics) or pick a (random) paper variant that meets constraints e.g., page limits.  We describe our implementation and report on some experiences with VaryLaTeX. },
  month_numeric = {2}
}
@article{alferez:hal-01688247,
  title = {Modeling Variability in the Video Domain: Language and Experience Report},
  author = {Alf{\'e}rez, Mauricio and Acher, Mathieu and Galindo, Jos{\'e} A and Baudry, Benoit and Benavides, David},
  url = {https://doi.org/10.1007/s11219-017-9400-8},
  journal = {{Software Quality Journal}},
  publisher = {{Springer Verlag}},
  pages = {307--347},
  volume = {27},
  number = {1},
  year = {2019},
  doi = {10.1007/s11219-017-9400-8},
  keywords = {video testing ; feature modeling ; variability modeling ; configuration ; domain-specific languages ; automated reasoning ; software product line engineering},
  pdf = {https://hal.inria.fr/hal-01688247/file/modeling-variability-video%20%2828%29.pdf},
  hal_id = {hal-01688247},
  hal_version = {v1},
  abstract = { [Context] In an industrial project, we addressed the challenge of developing a software-based video generator such that consumers and providers of video processing algorithms can benchmark them on a wide range of video variants. [Objective] This article aims to report on our positive experience in modeling, controlling, and implementing software variability in the video domain. [Method] We describe how we have designed and developed a variability modeling language, called VM, resulting from the close collaboration with industrial partners during two years. We expose the specific requirements and advanced variability constructs we developed and used to characterize and derive variations of video sequences. [Results] The results of our experiments and industrial experience show that our solution is effective to model complex variability information and supports the synthesis of hundreds of realistic video variants. [Conclusions] From the software language perspective, we learned that basic variability mechanisms are useful but not enough; attributes and multi-features are of prior importance; meta-information and specific constructs are relevant for scalable and purposeful reasoning over variability models. From the video domain and software perspective, we report on the practical benefits of a variability approach. With more automation and control, practitioners can now envision benchmarking video algorithms over large, diverse, controlled, yet realistic datasets (videos that mimic real recorded videos) – something impossible at the beginning of the project. }
}
@article{DBLP:journals/corr/abs-1805-12021,
  author = {Temple, Paul and Acher, Mathieu and Biggio, Battista and J{\'e}z{\'e}quel, Jean-Marc and Roli, Fabio},
  title = {Towards Adversarial Configurations for Software Product Lines},
  journal = {CoRR},
  volume = {abs/1805.12021},
  year = {2018},
  url = {http://arxiv.org/abs/1805.12021},
  archiveprefix = {arXiv},
  eprint = {1805.12021},
  timestamp = {Mon, 13 Aug 2018 16:48:59 +0200},
  biburl = {https://dblp.org/rec/bib/journals/corr/abs-1805-12021},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{temple:hal-01730163,
  title = {Multimorphic Testing},
  author = {Temple, Paul and Acher, Mathieu and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.inria.fr/hal-01730163},
  booktitle = {{ACM/IEEE 40th International Conference on Software Engineering: Companion Proceedings}},
  address = {Gothenburg, Sweden},
  pages = {1-2},
  year = {2018},
  month = may,
  doi = {10.1145/3183440.3195043},
  pdf = {https://hal.inria.fr/hal-01730163/file/Multimorphic.pdf},
  hal_id = {hal-01730163},
  hal_version = {v2},
  asbtract = {{ The functional correctness of a software application is, of course, a prime concern, but other issues such as its execution time, precision , or energy consumption might also be important in some contexts. Systematically testing these quantitative properties is still extremely difficult, in particular, because there exists no method to tell the developer whether such a test set is "good enough" or even whether a test set is better than another one. This paper proposes a new method, called Multimorphic testing, to assess the relative effectiveness of a test suite for revealing performance variations of a software system. By analogy with mutation testing, our core idea is to vary software parameters, and to check whether it makes any difference on the outcome of the tests: i.e. are some tests able to " kill " bad morphs (configurations)? Our method can be used to evaluate the quality of a test suite with respect to a quantitative property of interest, such as execution time or computation accuracy. }},
  month_numeric = {5}
}
@inproceedings{martinez:hal-01720519,
  title = {Towards Estimating and Predicting User Perception on Software Product Variants},
  author = {Martinez, Jabier and Sottet, Jean-S{\'e}bastien and Garcia Frey, Alfonso and Bissyand{\'e}, Tegawend{\'e} and Ziadi, Tewfik and Klein, Jacques and Temple, Paul and Acher, Mathieu and Le Traon, Yves},
  url = {https://hal.sorbonne-universite.fr/hal-01720519},
  booktitle = {{ ICSR 2018 - International Conference on Software Reuse}},
  address = {Madrid, Spain},
  pages = {1-16},
  year = {2018},
  month = may,
  keywords = {product variants ; Software Product Lines ; quality attributes ; quality estimation ; computer-generated art},
  pdf = {https://hal.sorbonne-universite.fr/hal-01720519/file/Martinez_et_al_ICSR2018.pdf},
  hal_id = {hal-01720519},
  hal_version = {v1},
  abstract = { Estimating and predicting user subjective perceptions on software products is a challenging, yet increasingly important, endeavour. As an extreme case study, we consider the problem of exploring computer-generated art object combinations that will please the maximum number of people. Since it is not feasible to gather feedbacks for all art products because of a combinatorial explosion of possible configurations as well as resource and time limitations, the challenging objective is to rank and identify optimal art product variants that can be generated based on their average likability. We present the use of Software Product Line (SPL) techniques for gathering and leveraging user feedbacks within the boundaries of a variability model. Our approach is developed in two phases: 1) the creation of a data set using a genetic algorithm and real feedback and 2) the application of a data mining technique on this data set to create a ranking enriched with confidence metrics. We perform a case study of a real-world computer-generated art system. The results of our approach on the arts domain reveal interesting directions for the analysis of user-specific qualities of SPLs. },
  month_numeric = {5}
}
@inproceedings{DBLP:journals/corr/abs-1710-07980,
  author = {Halin, Axel and Nuttinck, Alexandre and Acher, Mathieu and Devroey, Xavier and Perrouin, Gilles and Baudry, Benoit},
  title = {Test them all, is it worth it? A ground truth comparison of configuration sampling strategies},
  journal = {CoRR},
  volume = {abs/1710.07980},
  year = {2017},
  url = {http://arxiv.org/abs/1710.07980},
  archiveprefix = {arXiv},
  eprint = {1710.07980},
  timestamp = {Wed, 01 Nov 2017 19:05:42 +0100},
  biburl = {http://dblp.org/rec/bib/journals/corr/abs-1710-07980},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  abstract = { Many approaches for testing configurable software systems start from the same assumption: it is impossible to test all configurations.  This motivated the definition of variability-aware abstractions and sampling techniques to cope with large configuration spaces.  Yet, there is no theoretical barrier that prevents the exhaustive testing of all configurations by simply enumerating them, if the effort required to do so remains acceptable.  Not only this: we believe there is lots to be learned by systematically and exhaustively testing a configurable system.  In this article, we report on the first ever endeavor to test all possible configurations of an industry-strength, open source configurable software system, JHipster, a popular code generator for web applications. We built a testing scaffold for the 26,000+ configurations of JHipster using a cluster of 80 machines during 4 nights for a total of 4,376 hours (182 days) CPU time.  We find that 35.70\% configurations fail and we identify the feature interactions that cause the errors. We show that sampling testing strategies (like dissimilarity and 2-wise) (1) are more effective to find faults than the 12 default configurations used in the JHipster continuous integration; (2) can be too costly and exceed the available testing budget. We cross this quantitative analysis with the qualitative assessment of JHipster's lead developers. }
}
@proceedings{DBLP:conf/splc/2017a,
  editor = {Cohen, Myra B. and Acher, Mathieu and Fuentes, Lidia and Schall, Daniel and Bosch, Jan and Capilla, Rafael and Bagheri, Ebrahim and Xiong, Yingfei and Troya, Javier and Cortes, Antonio Ruiz and Benavides, David},
  title = {Proceedings of the 21st International Systems and Software Product Line Conference, SPLC 2017, Volume A, Sevilla, Spain, September 25-29, 2017},
  publisher = {{ACM}},
  year = {2017},
  url = {https://doi.org/10.1145/3106195},
  doi = {10.1145/3106195},
  isbn = {978-1-4503-5221-5},
  timestamp = {Tue, 06 Nov 2018 16:57:31 +0100},
  biburl = {https://dblp.org/rec/bib/conf/splc/2017a},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{plazar:hal-01545557,
  title = {Efficient and Complete FD-Solving for Extended Array Constraints},
  author = {Plazar, Quentin and Acher, Mathieu and Bardin, S{\'e}bastien and Gotlieb, Arnaud},
  url = {https://hal.archives-ouvertes.fr/hal-01545557},
  booktitle = {IJCAI 2017},
  address = {Melbourne, Australia},
  year = {2017},
  month = aug,
  keywords = {Constraint programming ; Automated reasoning ; Software verification and validation},
  pdf = {https://hal.archives-ouvertes.fr/hal-01545557/file/camera_ready.pdf},
  hal_id = {hal-01545557},
  hal_version = {v1},
  important = {1},
  abstract = { Array constraints are essential for handling data structures in automated reasoning and software verification. Unfortunately, the use of a typical finite domain (FD) solver based on local consistency-based filtering has strong limitations when constraints on indexes are combined with constraints on array elements and size. This paper proposes an efficient and complete FD-solving technique for extended constraints over (possibly unbounded) arrays. We describe a simple but particularly powerful transformation for building an equisatisfiable formula that can be efficiently solved using standard FD reasoning over arrays, even in the unbounded case. Experiments show that the proposed solver significantly outperforms FD solvers, and successfully competes with the best SMT-solvers. },
  month_numeric = {8}
}
@article{temple:hal-01659137,
  title = {Learning-Contextual Variability Models},
  author = {Temple, Paul and Acher, Mathieu and J{\'e}z{\'e}quel, Jean-Marc and Barais, Olivier},
  url = {https://hal.inria.fr/hal-01659137},
  journal = {IEEE Software},
  publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
  year = {2017},
  month = nov,
  hal_id = {hal-01659137},
  hal_version = {v1},
  important = {1},
  abstract = { Modeling how contextual factors relate to a software system’s configuration space is usually a manual, error-prone task that depends highly on expert knowledge. Machine-learning techniques can automatically predict the acceptable software configurations for a given context. Such an approach executes and observes a sample of software configurations within a sample of contexts. It then learns what factors of each context will likely discard or activate some of the software’s features. This lets developers and product managers automatically extract the rules that specialize highly configurable systems for specific contexts. },
  month_numeric = {11}
}
@article{TOCE2017,
  title = {Teaching Software Product Lines: A Snapshot of Current Practices and Challenges},
  author = {Acher, Mathieu and Lopez-Herrejon, Roberto Erick and Rabiser, Rick},
  journal = {ACM Transactions on Computing Education (TOCE)},
  publisher = {ACM},
  pages = {31},
  year = {2017},
  hal_id = {hal-01522779},
  abstract = { Software Product Line (SPL) engineering has emerged to provide the means to efficiently model, produce, and maintain multiple similar software variants, exploiting their common properties, and managing their variabilities (differences). With over two decades of existence, the community of SPL researchers and practitioners is thriving as can be attested by the extensive research output and the numerous successful industrial projects. Education has a key role to support the next generation of practitioners to build highly complex, variability-intensive systems. Yet, it is unclear how the concepts of variability and SPLs are taught, what are the possible missing gaps and difficulties faced, what are the benefits, or what is the material available. Also, it remains unclear whether scholars teach what is actually needed by industry. In this article we report on three initiatives we have conducted with scholars, educators, industry practitioners, and students to further understand the connection between SPLs and education, i.e., an online survey on teaching SPLs we performed with 35 scholars, another survey on learning SPLs we conducted with 25 students, as well as two workshops held at the International Software Product Line Conference in 2014 and 2015 with both researchers and industry practitioners participating. We build upon the two surveys and the workshops to derive recommendations for educators to continue improving the state of practice of teaching SPLs, aimed at both individual educators as well as the wider community. }
}
@inproceedings{halin:hal-01468084,
  title = {Yo Variability! JHipster: A Playground for Web-Apps Analyses},
  author = {Halin, Axel and Nuttinck, Alexandre and Acher, Mathieu and Devroey, Xavier and Perrouin, Gilles and Heymans, Patrick},
  url = {https://hal.inria.fr/hal-01468084},
  booktitle = {11th International Workshop on Variability Modelling of Software-intensive Systems (VaMoS'17)},
  address = {Eindhoven, Netherlands},
  pages = {44 - 51},
  year = {2017},
  month = feb,
  workshop = {1},
  doi = {10.1145/3023956.3023963},
  keywords = {Software testing and debugging ; Empirical software validation ; Software configuration management and version control systems ; Software product lines ; Software engineering education ; Case Study ; Web-apps ; Variability-related Analyses},
  pdf = {https://hal.inria.fr/hal-01468084/file/vamos-2017%20%2810%29.pdf},
  hal_id = {hal-01468084},
  hal_version = {v1},
  abstract = { Though variability is everywhere, there has always been a shortage of publicly available cases for assessing variability-aware tools and techniques as well as supports for teaching variability-related concepts. Historical software product lines contains industrial secrets their owners do not want to disclose to a wide audience. The open source community contributed to large-scale cases such as Eclipse, Linux kernels, or web-based plugin systems (Drupal, WordPress). To assess accuracy of sampling and prediction approaches (bugs, performance), a case where all products can be enumerated is desirable. As configuration issues do not lie within only one place but are scattered across technologies and assets, a case exposing such diversity is an additional asset. To this end, we present in this paper our efforts in building an explicit product line on top of JHipster, an industrial open-source Web-app configurator that is both manageable in terms of configurations (≈ 163,000) and diverse in terms of technologies used. We present our efforts in building a variability-aware chain on top of JHipster's configurator and lessons learned using it as a teaching case at the University of Rennes. We also sketch the diversity of analyses that can be performed with our infrastructure as well as early issues found using it. Our long term goal is both to support students and researchers studying variability analysis and JHipster developers in the maintenance and evolution of their tools. },
  month_numeric = {2}
}
@techreport{temple:hal-01467299,
  title = {Learning-Based Performance Specialization of Configurable Systems},
  author = {Temple, Paul and Acher, Mathieu and J{\'e}z{\'e}quel, Jean-Marc A and Noel-Baron, L{\'e}o A and Galindo, Jos{\'e} A},
  url = {https://hal.archives-ouvertes.fr/hal-01467299},
  type = {Research Report},
  institution = {{IRISA, Inria Rennes ; University of Rennes 1}},
  year = {2017},
  month = feb,
  keywords = {software product lines ; machine learning ; constraints and variability mining ; software testing ; variability modeling},
  pdf = {https://hal.archives-ouvertes.fr/hal-01467299/file/TSE-ML.pdf},
  hal_id = {hal-01467299},
  hal_version = {v1},
  important = {1},
  abstract = { A large scale configurable system typically offers thousands of options or parameters to let the engineers customize it for specific needs. Among the resulting many billions possible configurations, relating option and parameter values to desired performance is then a daunting task relying on a deep know how of the internals of the configurable system. In this paper, we propose a staged configuration process to narrow the space of possible configurations to a good approximation of those satisfying the wanted high level customer requirements. Based on an oracle (e.g. a runtime test) that tells us whether a given configuration meets the requirements (e.g. speed or memory footprint), we leverage machine learning to retrofit the acquired knowledge into a variability model of the system that can be used to automatically specialize the configurable system. We validate our approach on a set of well-known configurable software systems. Our results show that, for many different kinds of objectives and performance qualities, the approach has interesting accuracy, precision and recall after a learning stage based on a relative small number of random samples. },
  month_numeric = {2}
}
@article{bennasr:hal-01427218,
  title = {Automated Extraction of Product Comparison Matrices From Informal Product Descriptions},
  author = {Ben Nasr, Sana and B{\'e}can, Guillaume and Acher, Mathieu and Ferreira Filho, João Bosco and Sannier, Nicolas and Baudry, Benoit and Davril, Jean-Marc},
  url = {https://hal.inria.fr/hal-01427218},
  journal = {Journal of Systems and Software (JSS)},
  publisher = {Elsevier},
  volume = {124},
  pages = {82 - 103},
  year = {2017},
  doi = {10.1016/j.jss.2016.11.018},
  pdf = {https://hal.inria.fr/hal-01427218/file/spedoc.pdf},
  hal_id = {hal-01427218},
  hal_version = {v1},
  abstract = { Domain analysts, product managers, or customers aim to capture the important features and differences among a set of related products. A case-by-case reviewing of each product description is a laborious and time-consuming task that fails to deliver a condense view of a family of product. In this article, we investigate the use of automated techniques for synthesizing a product comparison matrix (PCM) from a set of product descriptions written in natural language. We describe a tool-supported process, based on term recognition, information extraction, clustering, and similarities, capable of identifying and organizing features and values in a PCM – despite the informality and absence of structure in the textual descriptions of products. We evaluate our proposal against numerous categories of products mined from BestBuy. Our empirical results show that the synthesized PCMs exhibit numerous quantitative, comparable information that can potentially complement or even refine technical descriptions of products. The user study shows that our automatic approach is capable of extracting a significant portion of correct features and correct values. This approach has been implemented in MatrixMiner a web environment with an interactive support for automatically synthesizing PCMs from informal product descriptions. MatrixMiner also maintains traceability with the original descriptions and the technical specifications for further refinement or maintenance by users. }
}
@article{DBLP:journals/taosd/FilhoAB16,
  author = {Ferreira Filho, João Bosco and Acher, Mathieu and Barais, Olivier},
  title = {Software Unbundling: Challenges and Perspectives},
  journal = {Trans. Modularity and Composition},
  volume = {1},
  pages = {224--237},
  year = {2016},
  url = {https://hal.inria.fr/hal-01427560},
  publisher = {LNCS},
  pdf = {https://hal.inria.fr/hal-01427560/file/UnbundlingExtension.pdf},
  hal_id = {hal-01427560},
  hal_version = {v1},
  abstract = { Unbundling is a phenomenon that consists of dividing an existing software artifact into smaller ones. It can happen for different reasons , one of them is the fact that applications tend to grow in func-tionalities and sometimes this can negatively influence the user experience. For example, mobile applications from well-known companies are being divided into simpler and more focused new ones. Despite its current importance, little is known or studied about unbundling or about how it relates to existing software engineering approaches, such as modularization. Consequently, recent cases point out that it has been performed unsystematically and arbitrarily. In this article, our main goal is to present this novel and relevant concept and its underlying challenges in the light of software engineering, also exemplifying it with recent cases. We relate un-bundling to standard software modularization, presenting the new motivations behind it, the resulting problems, and drawing perspectives for future support in the area. }
}
@inproceedings{DBLP:conf/splc/Lopez-HerrejonM16,
  author = {Lopez-Herrejon, Roberto Erick and Martinez, Jabier and Ziadi, Tewfik and Acher, Mathieu},
  title = {Fourth international workshop on reverse variability engineering (REVE 2016)},
  booktitle = {Proceedings of the 20th International Systems and Software Product Line Conference, SPLC 2016, Beijing, China, September 16-23, 2016},
  pages = {345},
  year = {2016},
  url = {http://doi.acm.org/10.1145/2934466.2962734},
  doi = {10.1145/2934466.2962734},
  timestamp = {Wed, 28 Sep 2016 08:07:26 +0200},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/splc/Lopez-HerrejonM16},
  bibsource = {dblp computer science bibliography, http://dblp.org}
}
@inproceedings{perrouin:hal-01406507,
  title = {Featured model types: Towards Systematic Reuse in Modelling Language Engineering},
  author = {Perrouin, Gilles and Amrani, Moussa and Acher, Mathieu and Combemale, Benoit and Legay, Axel and Schobbens, Pierre-Yves},
  url = {https://hal.inria.fr/hal-01406507},
  booktitle = {MiSE '16 - 8th International Workshop on Modeling in Software Engineering},
  publisher = {ACM},
  pages = {1 - 7},
  year = {2016},
  month = may,
  workshop = {1},
  doi = {10.1145/2896982.2896987},
  pdf = {https://hal.inria.fr/hal-01406507/file/c171.pdf},
  hal_id = {hal-01406507},
  hal_version = {v1},
  abstract = { By analogy with software product reuse, the ability to reuse (meta)models and model transformations is key to achieve better quality and productivity. To this end, various op-portunistic reuse techniques have been developed, such as higher-order transformations, metamodel adaptation, and model types. However, in contrast to software product development that has moved to systematic reuse by adopting (model-driven) software product lines, we are not quite there yet for modelling languages, missing economies of scope and automation opportunities. Our vision is to transpose the product line paradigm at the metamodel level, where reusable assets are formed by metamodel and transformation fragments and " products " are reusable language building blocks (model types). We introduce featured model types to concisely model variability amongst metamodelling elements, enabling configuration, automated analysis, and derivation of tailored model types. },
  month_numeric = {5}
}
@inproceedings{lenoir:hal-01374140,
  title = {A Decision-making Process for Exploring Architectural Variants in Systems Engineering},
  author = {Le Noir, J{\'e}rome and Madel{\'e}nat, S{\'e}bastien and Labreuche, Christophe and Constant, Olivier and Gailliard, Gr{\'e}gory and Acher, Mathieu and Barais, Olivier},
  url = {https://hal.inria.fr/hal-01374140},
  booktitle = {Software Product Lines Conference (SPLC)},
  address = {Beijing, China},
  year = {2016},
  month = sep,
  doi = {10.1145/1235},
  keywords = {Model-driven engineering ; Systems engineering ;  Decision-making ;  Multi- ; criteria decision analysis ;  Design Exploration ;  Architecture},
  pdf = {https://hal.inria.fr/hal-01374140/file/SPLC2016-ArchiComp.pdf},
  hal_id = {hal-01374140},
  hal_version = {v1},
  abstract = { In systems engineering, practitioners shall explore numerous architectural alternatives until choosing the most adequate variant. The decision-making process is most of the time a manual, time-consuming, and error-prone activity. The exploration and justification of architectural solutions is ad-hoc and mainly consists in a series of tries and errors on the mod-eling assets. In this paper, we report on an industrial case study in which we apply variability modeling techniques to automate the assessment and comparison of several candidate architectures (variants). We first describe how we can use a model-based approach such as the Common Variability Language (CVL) to specify the architectural variability. We show that the selection of an architectural variant is a multi-criteria decision problem in which there are numerous interactions (veto, favor, complementary) between criteria. We present a tooled process for exploring architectural variants integrating both CVL and the MYRIAD method for assessing and comparing variants based on an explicit preference model coming from the elicitation of stakeholders' concerns. This solution allows understanding differences among variants and their satisfactions with respect to criteria. Beyond variant selection automation improvement, this experiment results highlight that the approach improves rationality in the assessment and provides decision arguments when selecting the preferred variants. },
  month_numeric = {9}
}
@inproceedings{perrouin:hal-01427165,
  title = {A Complexity Tale: Web Configurators},
  author = {Perrouin, Gilles and Acher, Mathieu and Davril, Jean-Marc and Legay, Axel and Heymans, Patrick},
  url = {https://hal.inria.fr/hal-01427165},
  booktitle = {VACE 2016 -  1st International Workshop on Variability and Complexity in Software Design Pages (co-located with ICSE'16)},
  address = {Austin, United States},
  pages = {28 - 31},
  year = {2016},
  month = may,
  doi = {10.1145/2897045.2897051},
  pdf = {https://hal.inria.fr/hal-01427165/file/VACE16-WebConfigurators.pdf},
  hal_id = {hal-01427165},
  hal_version = {v1},
  workshop = {1},
  abstract = { Online configurators are basically everywhere. From physical goods (cars, clothes) to services (cloud solutions, insurances, etc.) such configurators have pervaded many areas of everyday life, in order to provide the customers products tailored to their needs. Being sometimes the only interfaces between product suppliers and consumers, much care has been devoted to the HCI aspects of configurators, aiming at offering an enjoyable buying experience. However, at the backend, the management of numerous and complex configuration options results from ad-hoc process rather than a systematic variability-aware engineering approach. We present our experience in analysing web configurators and formalising configuration options in terms of feature models or product configuration matrices. We also consider behavioural issues and perspectives on their architectural design. },
  month_numeric = {5}
}
@inproceedings{temple:hal-01323446,
  title = {Using Machine Learning to Infer Constraints for Product Lines},
  author = {Temple, Paul and Galindo Duarte, Jos{\'e} Angel and Acher, Mathieu and J{\'e}z{\'e}quel, Jean-Marc},
  url = {https://hal.inria.fr/hal-01323446},
  booktitle = {Software Product Line Conference (SPLC'16)},
  address = {Beijing, China},
  year = {2016},
  month = sep,
  doi = {10.1145/2934466.2934472},
  keywords = {software product lines ; machine learning ; constraints and variability mining ; software testing ; variability modeling},
  pdf = {https://hal.inria.fr/hal-01323446/file/SPLC_2016_Paper.pdf},
  hal_id = {hal-01323446},
  hal_version = {v1},
  important = {1},
  abstract = { Variability intensive systems may include several thousand features allowing for an enormous number of possible configurations , including wrong ones (e.g. the derived product does not compile). For years, engineers have been using constraints to a priori restrict the space of possible configurations , i.e. to exclude configurations that would violate these constraints. The challenge is to find the set of constraints that would be both precise (allow all correct configurations) and complete (never allow a wrong configuration with respect to some oracle). In this paper, we propose the use of a machine learning approach to infer such product-line constraints from an oracle that is able to assess whether a given product is correct. We propose to randomly generate products from the product line, keeping for each of them its resolution model. Then we classify these products according to the oracle, and use their resolution models to infer cross-tree constraints over the product-line. We validate our approach on a product-line video generator, using a simple computer vision algorithm as an oracle. We show that an interesting set of cross-tree constraint can be generated, with reasonable precision and recall. },
  month_numeric = {9}
}
@inproceedings{galindo:hal-01334851,
  title = {Exploiting the Enumeration of All Feature Model Configurations: A New Perspective with Distributed Computing},
  author = {Galindo, Jos{\'e} A. and Acher, Mathieu and Tirado, Juan Manuel and Vidal, Cristian and Baudry, Benoit and Benavides, David},
  url = {https://hal.archives-ouvertes.fr/hal-01334851},
  booktitle = {Software Product Line Conference (SPLC'16)},
  address = {Beijing, China},
  year = {2016},
  month = sep,
  pdf = {https://hal.archives-ouvertes.fr/hal-01334851/file/paper.pdf},
  hal_id = {hal-01334851},
  hal_version = {v1},
  abstract = { Feature models are widely used to encode the configurations of a software product line in terms of mandatory, optional and exclusive features as well as propositional constraints over the features. Numerous computationally expensive procedures have been developed to model check, test, configure, debug, or compute relevant information of feature models. In this paper we explore the possible improvement of relying on the enumeration of all configurations when performing automated analysis operations. The key idea is to pre-compile configurations so that reasoning operations (queries and transformations) can then be performed in polytime. We tackle the challenge of how to scale the existing enu-meration techniques. We show that the use of distributed computing techniques might offer practical solutions to previously unsolvable problems and opens new perspectives for the automated analysis of software product lines. },
  slides = {http://www.slideshare.net/acher/exploiting-the-enumeration-of-all-feature-model-configurations-a-new-perspective-with-distributed-computing},
  month_numeric = {9}
}
@techreport{acher:hal-01307091,
  title = {Large-scale Analysis of Chess Games with Chess Engines: A Preliminary Report},
  author = {Acher, Mathieu and Esnault, François},
  url = {https://hal.inria.fr/hal-01307091},
  type = {Technical Report},
  number = {RT-0479},
  institution = {{Inria Rennes Bretagne Atlantique}},
  year = {2016},
  month = apr,
  keywords = { chess game ; chess ;  artificial intelligence ;  data analysis ;  grid computing},
  pdf = {https://hal.inria.fr/hal-01307091/file/RT-479%20%281%29.pdf},
  hal_id = {hal-01307091},
  hal_version = {v1},
  abstract = { The strength of chess engines together with the availability of numerous chess games have attracted the attention of chess players, data scientists, and researchers during the last decades. State-of-the-art engines now provide an authoritative judgement that can be used in many applications like cheating detection, intrinsic ratings computation, skill assessment, or the study of human decision-making. A key issue for the research community is to gather a large dataset of chess games together with the judgement of chess engines. Unfortunately the analysis of each move takes lots of times. In this paper, we report our effort to analyse almost 5 millions chess games with a computing grid. During summer 2015, we processed 270 millions unique played positions using the Stockfish engine with a quite high depth (20). We populated a database of 1+ tera-octets of chess evaluations, representing an estimated time of 50 years of computation on a single machine. Our effort is a first step towards the replication of research results, the supply of open data and procedures for exploring new directions, and the investigation of software engineering/scalability issues when computing billions of moves. },
  important = {1},
  month_numeric = {4}
}
@inproceedings{kim:hal-01241673,
  title = {A Formal Modeling and Analysis Framework for Software Product Line of Preemptive Real-Time Systems},
  author = {Kim, Jin Hyung and Legay, Axel and Traonouez, Louis-Marie and Acher, Mathieu and Kang, Sungwon},
  booktitle = {Sympsosium on Applied Computing (SAC'16), software engineering track},
  year = {2016},
  hal_id = {hal-01241673},
  abstract = { Adapting real-time embedded software for various variants of an application and usage contexts is highly demanded. However, the question of how to analyze real-time properties for a family of products (rather than for a single one) has not drawn much attention from researchers. In this paper , we present a formal analysis framework to analyze a family of platform products w.r.t. real-time properties. To this end, we rst propose an extension of the widely-used feature model, called Property Feature Model (PFM), that distinguishes features and properties explicitly, so that the scope of properties restricted to features can be explicitly dened. Then we present formal behavioral models of components of a real-time scheduling unit, i.e. tasks, resources, and resource schedulers, such that all real-time scheduling units implied by a PFM are automatically composed with the components to be analyzed against the properties given by the PFM. We apply our approach to the verication of the schedulability of a family of scheduling units using the symbolic and statistical model checkers of Uppaal. }
}
@inproceedings{pinchinat:hal-01243021,
  title = {ATSyRa: An Integrated Environment for Synthesizing Attack Trees },
  author = {Pinchinat, Sophie and Acher, Mathieu and Vojtisek, Didier},
  booktitle = {Second International Workshop on Graphical Models for Security (GraMSec'15) co-located with CSF'15},
  address = {Verona, Italy},
  series = { Second International Workshop on Graphical Models for Security (GraMSec'15) co-located with CSF'15},
  volume = { Second International Workshop on Graphical Models for Security (GraMSec'15) co-located with CSF'15},
  year = {2015},
  month = jul,
  workshop = {1},
  hal_id = {hal-01243021},
  hal_version = {v1},
  abstract = { Attack trees are widely considered in the fields of security for the analysis of risks (or threats) against electronics, computer control, or physical systems. A major barrier is that attack trees can become largely complex and thus hard to specify. This paper presents ATSyRA, a tooling environment to automatically synthesize attack trees of a system under study. ATSyRA provides advanced editors to specify high-level descriptions of a system, high-level actions to structure the tree, and ways to interactively refine the synthesis. We illustrate how users can specify a military building, abstract and organize attacks, and eventually obtain a readable attack tree. },
  month_numeric = {7}
}
@inproceedings{davril:hal-01243006,
  title = {Using fuzzy modeling for consistent definitions of product qualities in requirements},
  author = {Davril, Jean-Marc and Cordy, Maxime and Heymans, Patrick and Acher, Mathieu},
  address = {Otawa , Canada},
  booktitle = {IEEE Second International Workshop on Artificial Intelligence for Requirements Engineering (AIRE'15)},
  series = {IEEE Second International Workshop on Artificial Intelligence for Requirements Engineering (AIRE'15)},
  year = {2015},
  month = aug,
  doi = {10.1109/AIRE.2015.7337624},
  hal_id = {hal-01243006},
  workshop = {1},
  hal_version = {v1},
  abstract = { Companies increasingly rely on product differentiation and personalization strategies to provide their customers with an expansive catalog, and tools to assist them in finding the product meeting their needs. These tools include product search facilities, recommender systems, and product configurators. They typically represent a product as a set of features, which refer to a large number of technical specifications (e.g. size, weight, battery life). However, customers usually communicate and reason about products in terms of their qualities (e.g. ease-of-use, portability, ergonomics). In this paper, we tackle the problem of formalizing product qualities in the requirements of product-centred applications. Our goal is to extract product qualities from their technical features, so that customers can better perceive and evaluate the proposed products. To this end, we design a procedure for identifying segments of textual product documentation related to specific product qualities, and propose an approach based on fuzzy modeling to represent product qualities on top of technical specifications. Preliminary experiments we carried out on a catalog of cameras tend to show that fuzzy modeling is an appropriate formalism for representing product qualities. We also illustrate how modeled qualities can support the design of product configurators that are centered on the customers' needs. },
  month_numeric = {8}
}
@inproceedings{NasrMM2015,
  author = {Nasr, Sana Ben and B{\'e}can, Guillaume and Acher, Mathieu and Filho, Joao Bosco Ferreira and Baudry, Benoit and Sannier, Nicolas and Davril, Jean-Marc},
  title = {MatrixMiner: a red pill to architect informal product descriptions in the matrix},
  booktitle = {Proceedings of the 2015 10th Joint Meeting on Foundations of Software Engineering, (ESEC/FSE'15)},
  pages = {982--985},
  year = {2015},
  hal_id = {hal-01234338},
  abstract = { Domain analysts, product managers, or customers aim to capture the important features and differences among a set of related products. A case-by-case reviewing of each product description is a laborious and time-consuming task that fails to deliver a condensed view of a product line. This paper introduces MatrixMiner: a tool for automatically synthesizing product comparison matrices (PCMs) from a set of product descriptions written in natural language. MatrixMiner is capable of identifying and organizing features and values in a PCM – despite the informality and absence of structure in the textual descriptions of products. Our empirical results of products mined from BestBuy show that the synthesized PCMs exhibit numerous quantitative, comparable information. Users can exploit MatrixMiner to visualize the matrix through a Web editor and review, refine, or complement the cell values thanks to the traceability with the original product descriptions and technical specifications. }
}
@inproceedings{acherjeopardize15,
  author = {Acher, Mathieu and B{\'e}can, Guillaume and Combemale, Benoit and Baudry, Benoit and J{\'e}z{\'e}quel, Jean-Marc},
  title = {Product lines can jeopardize their trade secrets},
  booktitle = {Proceedings of the 2015 10th Joint Meeting on Foundations of Software Engineering (ESEC/FSE'15)},
  pages = {930--933},
  year = {2015},
  hal_id = {hal-01234342},
  abstract = { What do you give for free to your competitor when you exhibit a product line? This paper addresses this question through several cases in which the discovery of trade secrets of a product line is possible and can lead to severe consequences. That is, we show that an outsider can understand the variability realization and gain either confidential business information or even some economical direct advantage. For instance, an attacker can identify hidden constraints and bypass the product line to get access to features or copyrighted data. This paper warns against possible naive modeling, implementation, and testing of variability leading to the existence of product lines that jeopardize their trade secrets. Our vision is that defensive methods and techniques should be developed to protect specifically variability – or at least further complicate the task of reverse engineering it. },
  slides = {http://fr.slideshare.net/gbecan/product-lines-can-jeopardize-their-trade-secrets}
}
@inproceedings{davril2015,
  title = {Towards Breaking The Curse of Dimensionality in Reverse Engineering Feature Models},
  author = {Davril, Jean-Marc and Acher, Mathieu and B{\'e}can, Guillaume and Heymans, Patrick},
  booktitle = {17th International Configuration Workshop (ConfWS'15)},
  address = {Vienna, Austria},
  hal_id = {hal-01243571},
  year = {2015},
  month = sep,
  abstract = { Feature models have become one of the most widely used formalism for representing the variability among the products of a product line. The design of a feature model from a set of existing products can help stakeholders communicate on the commonalities and differences between the products, facilitate the adoption of mass customization strategies, or provide foundations for engineering product configurators. As the manual construction of feature models proves to be a time-consuming and error prone task, researchers have proposed various approaches for automatically deriving feature models from available product data. Existing techniques mostly rely on data mining algorithms that search for frequently occurring patterns between the features included in product configurations. However, when the number of features is too large, the sparsity among the configurations can reduce the quality of the extracted model. In this paper, we discuss motivations for the development of dimensionality reduction techniques for product lines in order to support the extraction of feature models in the case of high-dimensional product spaces. We use a real world dataset to illustrate the problems arising with high dimensionality and present four research questions to address these problems. },
  month_numeric = {9}
}
@inproceedings{becan2015AFM,
  title = {Synthesis of Attributed Feature Models From Product Descriptions},
  author = {B{\'e}can, Guillaume and Behjati, Razieh and Gotlieb, Arnaud and Acher, Mathieu},
  booktitle = {19th International Software Product Line Conference (SPLC'15)},
  note = {(research track, long paper)},
  address = {Nashville, TN, USA},
  important = {1},
  year = {2015},
  month = jul,
  hal_id = {hal-01178454},
  abstract = { Many real-world product lines are only represented as non-hierarchical collections of distinct products, described by their configuration values. As the manual preparation of feature models is a tedious and labour-intensive activity, some techniques have been proposed to automatically generate boolean feature models from product descriptions. However, none of these techniques is capable of synthesizing feature attributes and relations among attributes, despite the huge relevance of attributes for documenting software product lines. In this paper, we introduce for the first time an algorithmic and parametrizable approach for computing a legal and appropriate hierarchy of features, including feature groups, typed feature attributes, domain values and relations among these attributes. We have performed an empirical evaluation by using both randomized configuration matrices and real-world examples. The initial results of our evaluation show that our approach can scale up to matrices containing 2,000 attributed features, and 200,000 distinct configurations in a couple of minutes. },
  slides = {http://fr.slideshare.net/acher/synthesis-of-attributed-feature-models-from-product-descriptions},
  month_numeric = {7}
}
@inproceedings{bosco2015,
  title = {Assessing Product Line Derivation Operators Applied to Java Source Code: An Empirical Study},
  author = {Ferreira Filho, João Bosco and Allier, Simon and Acher, Mathieu and Barais, Olivier and Baudry, Benoit},
  booktitle = {19th International Software Product Line Conference (SPLC'15)},
  note = {(research track, long paper)},
  address = {Nashville, TN, USA},
  year = {2015},
  important = {1},
  month = jul,
  hal_id = {hal-01163423},
  abstract = { Product Derivation is a key activity in Software Product Line Engineering. During this process, derivation operators modify or create core assets (e.g., model elements, source code instructions, components) by adding, removing or substituting them according to a given configuration. The result is a derived product that generally needs to conform to a programming or modeling language. Some operators lead to invalid products when applied to certain assets, some others do not; knowing this in advance can help to better use them, however this is challenging, specially if we consider assets expressed in extensive and complex languages such as Java. In this paper, we empirically answer the following question: which product line operators, applied to which program elements, can synthesize variants of programs that are incorrect, correct or perhaps even conforming to test suites? We implement source code transformations, based on the derivation operators of the Common Variability Language. We automatically synthesize more than 370,000 program variants from a set of 8 real large Java projects (up to 85,000 lines of code), obtaining an extensive panorama of the sanity of the operations. },
  slides = {http://fr.slideshare.net/acher/assessing-product-line-derivation-operators-applied-to-java-source-code-an-empirical-study},
  month_numeric = {7}
}
@inproceedings{DBLP:conf/splc/Lopez-HerrejonZ15,
  author = {Lopez-Herrejon, Roberto E. and Ziadi, Tewfik and Martinez, Jabier and Thurimella, Anil Kumar and Acher, Mathieu},
  title = {Third International Workshop on Reverse Variability Engineering (REVE 2015)},
  booktitle = {Proceedings of the 19th International Conference on Software Product Line, {SPLC} 2015, Nashville, TN, USA, July 20-24, 2015},
  pages = {394},
  year = {2015},
  url = {http://doi.acm.org/10.1145/2791060.2791062},
  doi = {10.1145/2791060.2791062},
  timestamp = {Fri, 14 Aug 2015 10:03:25 +0200},
  bibsource = {dblp computer science bibliography, http://dblp.org}
}
@inproceedings{degueule2015,
  title = {Tooling Support for Variability and Architectural Patterns in Systems Engineering},
  author = {Degueule, Thomas and Filho, João Bosco Ferreira and Barais, Olivier and Acher, Mathieu and Lenoir, Jérôme and Constant, Olivier and Madelenat, Sebastien and Gailliard, Gregory and Burlot, Godefroy},
  booktitle = {19th International Software Product Line Conference (SPLC'15)},
  note = {(demonstration and tool track)},
  address = {Nashville, TN, USA},
  year = {2015},
  month = jul,
  hal_id = {hal-01242180},
  abstract = { In systems engineering, the deployment of software components is error-prone since numerous safety and security rules have to be preserved. Another related problem is that numerous variants of deployments on different platforms are possible. In this paper we present a technological solution to assist industrial practitioners in producing a safe and secure solution out of numerous architectural variants. First, we introduce a pattern technology that provides correct-by-construction deployment models through the reuse of modeling artefacts organized in a catalog. Second, we develop a variability solution, connected to the pattern technology and based on an extension of the common variability language, for supporting the synthesis of model-based architectural variants. This paper describes a live demonstration of an industrial effort seeking to bridge the gap between variability modeling and system engineering practices. We illustrate the tooling support with an industrial case study (a secure radio platform). },
  month_numeric = {7}
}
@misc{Lopez-HerrejonZ15,
  author = {Acher, Mathieu},
  title = {Talk at Third International Workshop on Reverse Variability Engineering (REVE'15)},
  booktitle = {Proceedings of the 19th International Conference on Software Product Line (SPLC'15)},
  pages = {394},
  year = {2015},
  sorte = {autre},
  slides = {http://fr.slideshare.net/acher/from-basic-variability-models-to-opencompareorg},
  abstract = { Variability is omnipresent in numerous kinds of artefacts (e.g., source code, product matrices) and in different shapes (e.g., conditional compilation, differences among product descriptions). For understanding, reasoning about, maintaining or evolving variability, practitioners usually need an explicit encoding of variability (ie a variability model). As a result, numerous techniques have been developed to reverse engineer variability (e.g., through the mining of features and constraints from source code) or for migrating a set of products as a variability system. In this talk we will first present tool-supported techniques for synthesizing variability models from constraints or product descriptions. Practitioners can build Boolean feature models with an interactive environment for selecting a meaningful and sound hierarchy. Attributes can also be synthesized for encoding numerical values and constraints among them. We will present key results we obtain through experiments with the SPLOT repository and product comparison matrices coming from Wikipedia and BestBuy. Finally we will introduce OpenCompare.org a recent initiative for editing, reasoning, and mining product comparison matrices. This talk has been done at REVE'15 workshop co-located with SPLC'15 (software product line conference): http://www.isse.jku.at/reve2015/program.html }
}
@inproceedings{ferreirafilho:hal-01116694,
  title = {Challenges on Software Unbundling: Growing and Letting Go},
  author = {Ferreira Filho, João Bosco and Acher, Mathieu and Barais, Olivier},
  url = {https://hal.inria.fr/hal-01116694},
  booktitle = {14th International Conference on Modularity'15},
  address = {Fort Collins, CO, United States},
  year = {2015},
  month = mar,
  keywords = {Unbundling ; refactoring ; evolution ; features ; aspects ; reengi-neering ; modularization},
  hal_id = {hal-01116694},
  hal_version = {v1},
  abstract = { Unbundling is a phenomenon that consists of dividing an existing software artifact into smaller ones. For example, mobile applications from well-known companies are being divided into simpler and more focused new ones. Despite its current importance, little is known or studied about unbundling or about how it relates to existing software engineering approaches, such as modularization. Consequently, recent cases points out that it has been performed unsystematically and arbitrarily. In this paper, our main goal is to present this novel and relevant concept and its challenges in the light of software engineering, exemplifying it with recent cases. We relate unbundling to standard software modularization, presenting the new motivations behind it, the resulting problems, and drawing perspectives for future support in the area. },
  month_numeric = {3}
}
@inproceedings{acherSPLTea15,
  title = {SPLTea 2015: Second International Workshop on Software Product Line Teaching},
  author = {Acher, Mathieu and Lopez-Herrejon, Roberto Erick and Rabiser, Rick},
  abstract = {Education has a key role to play for disseminating the constantly growing body of Software Product Line (SPL) knowledge. Teaching SPLs is challenging; it is unclear, for example, how SPLs can be taught and what is the material available. This workshop aims to explore and explain the current status and ongoing work on teaching SPLs at universities, colleges, and in industry (e.g., by consultants). This second edition will continue the effort made at SPLTea'14. In particular we seek to design and populate an open repository of resources dedicated to SPL teaching. See also http://teaching.variability.io },
  keywords = {teaching; software product lines; configurator},
  language = {Anglais},
  booktitle = {19th International Software Product Line Conference (SPLC'15)},
  address = {Nashville, USA},
  audience = {internationale },
  year = {2015},
  hal_id = {hal-01243213},
  month = jul,
  month_numeric = {7}
}
@techreport{becan:hal-01116663,
  title = {Synthesis of Attributed Feature Models From Product Descriptions: Foundations},
  author = {B{\'e}can, Guillaume and Behjati, Razieh and Gotlieb, Arnaud and Acher, Mathieu},
  url = {https://hal.inria.fr/hal-01116663},
  type = {Rapport de Recherche},
  number = {RR-8680},
  institution = {{Inria Rennes}},
  year = {2015},
  month = feb,
  keywords = { reverse engineering ; feature model ;  synthesis ;  automated reasoning},
  hal_id = {hal-01116663},
  hal_version = {v1},
  abstract = { Feature modeling is a widely used formalism to characterize a set of products (also called configurations). As a manual elaboration is a long and arduous task, numerous techniques have been proposed to reverse engineer feature models from various kinds of artefacts. But none of them synthesize feature attributes (or constraints over attributes) despite the practical relevance of attributes for documenting the different values across a range of products. In this report, we develop an algorithm for synthesizing attributed feature models given a set of product descriptions. We present sound, complete, and parametrizable techniques for computing all possible hierarchies, feature groups, placements of feature attributes, domain values, and constraints. We perform a complexity analysis w.r.t. number of features, attributes, configurations, and domain size. We also evaluate the scalability of our synthesis procedure using randomized configuration matrices. This report is a first step that aims to describe the foundations for synthesizing attributed feature models. },
  month_numeric = {2}
}
@inproceedings{becan:hal-01104797,
  title = {On the Variability Secrets of an Online Video Generator},
  author = {B{\'e}can, Guillaume and Acher, Mathieu and J{\'e}z{\'e}quel, Jean-Marc and Menguy, Thomas},
  url = {https://hal.inria.fr/hal-01104797},
  booktitle = {Variability Modelling of Software-intensive Systems (VaMoS'15)},
  address = {Hildesheim, Germany},
  pages = {96 - 102},
  year = {2015},
  month = jan,
  workshop = {1},
  doi = {10.1145/2701319.2701328},
  keywords = {Video generator ; Configurator ; Software product line ; Security ; Reverse engineering},
  hal_id = {hal-01104797},
  hal_version = {v1},
  abstract = { We relate an original experience concerning a popular online video service that offers to generate variants of an humorous video. To further the understanding of the generator, we have reverse engineered its general behavior, architecture, as well as its variation points and its configuration space. The reverse engineering also allows us to create a new generator and online configurator that proposes 18 variation points – instead of only 3 as in the original generator. We explain why and how we have collaborated and are collaborating with the original creators of the video generator. We also highlight how our reverse engineering work represents a threat to the original service and call for further investigating variability-aware security mechanisms. },
  month_numeric = {1}
}
@article{becan:hal-01096969,
  title = {Breathing Ontological Knowledge Into Feature Model Synthesis: An Empirical Study},
  author = {B{\'e}can, Guillaume and Acher, Mathieu and Baudry, Benoit and Ben Nasr, Sana},
  url = {https://hal.inria.fr/hal-01096969},
  journal = {Empirical Software Engineering (ESE)},
  publisher = {Springer},
  pages = {1794--1841},
  volume = {21},
  number = {4},
  year = {2016},
  doi = {10.1007/s10664-014-9357-1},
  keywords = {Software Product Lines ; Feature Model ; Variability ; Model Management ; Reverse Engineering ; Refactoring},
  hal_id = {hal-01096969},
  hal_version = {v1},
  important = {1},
  abstract = { Feature Models (FMs) are a popular formalism for modeling and reasoning about the configurations of a software product line. As the manual construction of an FM is time-consuming and error-prone, management operations have been developed for reverse engineering, merging, slicing, or refactoring FMs from a set of configurations/dependencies. Yet the synthesis of meaningless ontological relations in the FM – as defined by its feature hierarchy and feature groups – may arise and cause severe difficulties when reading, maintaining or exploiting it. Numerous synthesis techniques and tools have been proposed, but only a few consider both configuration and ontolog-ical semantics of an FM. There are also few empirical studies investigating ontological aspects when synthesizing FMs. In this article, we define a generic, ontologic-aware synthesis procedure that computes the likely siblings or parent candidates for a given feature. We develop six heuristics for clustering and weighting the logical, syntactical and semantical relationships between feature names. We then perform an empirical evaluation on hundreds of FMs, coming from the SPLOT repository and Wikipedia. We provide evidence that a fully automated synthesis (i.e., without any user intervention) is likely to produce FMs far from the ground truths. As the role of the user is crucial, we empirically analyze the strengths and weak-nesses of heuristics for computing ranking lists and different kinds of clusters. We show that a hybrid approach mixing logical and ontological techniques outperforms state-of-the-art solutions. We believe our approach, environment, and empirical results support researchers and practitioners working on reverse engineering and management of FMs. }
}
@inproceedings{acher:hal-01061576,
  hal_id = {hal-01061576},
  url = {http://hal.inria.fr/hal-01061576},
  title = {Metamorphic Domain-Specific Languages: A Journey Into the Shapes of a Language},
  author = {Acher, Mathieu and Combemale, Benoit and Collet, Philippe},
  abstract = {External or internal domain-specific languages (DSLs) or (fluent) APIs? Whoever you are -- a developer or a user of a DSL -- you usually have to choose side; you should not! What about metamorphic DSLs that change their shape according to your needs? Our 4-years journey of providing the "right" support (in the domain of feature modeling), led us to develop an external DSL, different shapes of an internal API, and maintain all these languages. A key insight is that there is no one-size-fits-all solution or no clear superiority of a solution compared to another. On the contrary, we found that it does make sense to continue the maintenance of an external and internal DSL. Based on our experience and on an analysis of the DSL engineering field, the vision that we foresee for the future of software languages is their ability to be self-adaptable to the most appropriate shape (including the corresponding integrated development environment) according to a particular usage or task. We call metamorphic DSL such a language, able to change from one shape to another shape.},
  keywords = {domain-specific languages; programming; feature model; metamorphic; SQL; database;},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA , Laboratoire d'Informatique, Signaux, et Syst{\`e}mes de Sophia-Antipolis (I3S) / Equipe MODALIS},
  booktitle = {Onward! Essays (co-located with SPLASH and OOPSLA)},
  address = {Portland, {\'E}tats-Unis},
  audience = {internationale },
  doi = {10.1145/2661136.2661159 },
  year = {2014},
  month = sep,
  slides = {http://fr.slideshare.net/acher/metamorphic-domainspecific-languages},
  pdf = {http://hal.inria.fr/hal-01061576/PDF/onwd1446-acher.pdf},
  month_numeric = {9}
}
@inproceedings{pinchinat:hal-01064645,
  hal_id = {hal-01064645},
  url = {http://hal.inria.fr/hal-01064645},
  title = {Towards Synthesis of Attack Trees for Supporting Computer-Aided Risk Analysis},
  author = {Pinchinat, Sophie and Acher, Mathieu and Vojtisek, Didier},
  abstract = {Attack trees are widely used in the fields of defense for the analysis of risks (or threats) against electronics systems, computer control systems or physical systems. Based on the analysis of attack trees, practitioners can define actions to engage in order to reduce or annihilate risks. A major barrier to support computer-aided risk analysis is that attack trees can become largely complex and thus hard to specify. This paper is a first step towards a methodology, formal foundations as well as automated techniques to synthesize attack trees from a high-level description of a system. Attacks are expressed as a succession of elementary actions and high-level actions can be used to abstract and organize attacks into exploitable attack trees. We describe our tooling support and identify open challenges for supporting the analysis of risks.},
  language = {Anglais},
  affiliation = {LOGICA - IRISA , DIVERSE - INRIA - IRISA},
  booktitle = {Workshop on Formal Methods in the Development of Software (co-located with SEFM)},
  address = {Grenoble, France},
  audience = {internationale },
  year = {2014},
  month = sep,
  workshop = {1},
  pdf = {http://hal.inria.fr/hal-01064645/PDF/SEFM-FMDS.pdf},
  month_numeric = {9}
}
@techreport{alferez:hal-01023159,
  hal_id = {hal-01023159},
  url = {http://hal.inria.fr/hal-01023159},
  title = {Modeling Variability in the Video Domain: Language and Experience Report},
  author = {Alf{\'e}rez, Mauricio and Galindo, Jos{\'e} A. and Acher, Mathieu and Baudry, Benoit},
  abstract = {This paper reports about a new domain-specific variability modeling language, called VM, resulting from the close collaboration with industrial partners in the video domain. We expose the requirements and advanced variability constructs required to characterize and realize variations of physical properties of a video (such as objects' speed or scene illumination). The results of our experiments and industrial experience show that VM is effective to model complex variability information and can be exploited to synthesize video variants. We concluded that basic variability mechanisms are useful but not enough, attributes and multi-features are of prior importance, and meta-information is relevant for efficient variability analysis. In addition, we questioned the existence of one-size-fits-all variability modeling solution applicable in any industry. Yet, some common needs for modeling variability are becoming apparent such as support for attributes and multi-features.},
  keywords = {VM, variability modeling, product line engineering, highly configurable systems, textual specification languages},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA},
  type = {Rapport de Recherche},
  institution = {INRIA},
  number = {RR-8576},
  year = {2014},
  month = jul,
  other = {1},
  pdf = {http://hal.inria.fr/hal-01023159/PDF/RR-8576.pdf},
  month_numeric = {7}
}
@inproceedings{becan:hal-01058440,
  hal_id = {hal-01058440},
  url = {http://hal.inria.fr/hal-01058440},
  title = {Automating the Formalization of Product Comparison Matrices},
  author = {B{\'e}can, Guillaume and Sannier, Nicolas and Acher, Mathieu and Barais, Olivier and Blouin, Arnaud and Baudry, Benoit},
  abstract = {Product Comparison Matrices (PCMs) form a rich source of data for comparing a set of related and competing products over numerous features. Despite their apparent simplicity, PCMs contain heterogeneous, ambiguous, uncontrolled and partial information that hinders their efficient exploitations. In this paper, we formalize PCMs through model-based automated techniques and develop additional tooling to support the edition and re-engineering of PCMs. 20 participants used our editor to evaluate the PCM metamodel and automated transformations. The results over 75 PCMs from Wikipedia show that (1) a significant proportion of the formalization of PCMs can be automated - 93.11\% of the 30061 cells are correctly formalized; (2) the rest of the formalization can be realized by using the editor and mapping cells to existing concepts of the metamodel. The automated approach opens avenues for engaging a community in the mining, re-engineering, edition, and exploitation of PCMs that now abound on the Internet.},
  keywords = {Metamodeling; Product comparison matrices; Domain analysis; Automated transformation},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA},
  booktitle = {29th IEEE/ACM International Conference on Automated Software Engineering (ASE'14)},
  address = {V{\"a}ster{\aa}s, Su{\`e}de},
  audience = {internationale },
  doi = {10.1145/2642937.2643000 },
  year = {2014},
  month = sep,
  slides = {http://fr.slideshare.net/gbecan/ase2014-presentation},
  important = {1},
  pdf = {http://hal.inria.fr/hal-01058440/PDF/FromData2Models.pdf},
  month_numeric = {9}
}
@article{ferreirafilho:hal-01026581,
  hal_id = {hal-01026581},
  url = {http://hal.inria.fr/hal-01026581},
  title = {Generating Counterexamples of Model-based Software Product Lines},
  author = {Ferreira Filho, Joao Bosco and Barais, Olivier and Acher, Mathieu and Le Noir, J{\'e}r{\^o}me and Legay, Axel and Baudry, Benoit},
  abstract = {In a Model-based Software Product Line (MSPL), the variability of the domain is characterized in a variability model and the core artifacts are base models conforming to a modeling language (also called metamodel). A realization model connects the features of the variability model to the base model elements, triggering operations over these elements based on a configuration. The design space of an MSPL is extremely complex to manage for the engineer, since the number of variants may be exponential and the derived product models have to be conforming to numerous well-formedness and business rules. In this paper, the objective is to provide a way to generate MSPLs, called counterexamples (also called anti-patterns), that can produce invalid product models despite a valid configuration in the variability model. We describe the foundations and motivate the usefulness of counterexamples (e.g., inference of guidelines or domain-specific rules to avoid earlier the specification of incorrect mappings; testing oracles for increasing the robustness of derivation engines given a modeling language). We provide a generic process, based on the Common Variability Language (CVL) to randomly search the space of MSPLs for a specific modelling language. We develop LineGen a tool on top of CVL and modeling technologies to support the methodology and the process. LineGen targets different scenarios and is flexible to work either with just a domain metamodel as input or also with pre-defined variability models and base models. We validate the effectiveness of this process for three formalisms at different scales (up to 247 metaclasses and 684 rules). We also apply the approach in the context of a real industrial scenario involving a large-scale metamodel.},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA , Thales Research and Technology [Palaiseau] , ESTASYS - INRIA},
  publisher = {Springer},
  journal = {International Journal on Software Tools for Technology Transfer (STTT)},
  audience = {internationale },
  year = {2014},
  month = jul,
  important = {1},
  pdf = {http://hal.inria.fr/hal-01026581/PDF/STTT2014.pdf},
  month_numeric = {7}
}
@inproceedings{acher:hal-01018937,
  hal_id = {hal-01018937},
  url = {http://hal.inria.fr/hal-01018937},
  title = {Customization and 3D Printing: A Challenging Playground for Software Product Lines},
  author = {Acher, Mathieu and Baudry, Benoit and Barais, Olivier and J{\'e}z{\'e}quel, Jean-Marc},
  abstract = {3D printing is gaining more and more momentum to build customized product in a wide variety of fields. We conduct an exploratory study of Thingiverse, the most popular Website for sharing user-created 3D design files, in order to establish a possible connection with software product line (SPL) engineering. We report on the socio-technical aspects and current practices for modeling variability, implementing variability, configuring and deriving products, and reusing artefacts. We provide hints that SPL-alike techniques are practically used in 3D printing and thus relevant. Finally, we discuss why the customization in the 3D printing field represents a challenging playground for SPL engineering.},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA},
  booktitle = {18th International Software Product Line Conference (SPLC'14), research track},
  address = {Florence, Italie},
  audience = {internationale },
  year = {2014},
  month = jul,
  slides = {http://fr.slideshare.net/acher/3d-printing-customization-and-product-lines},
  pdf = {http://hal.inria.fr/hal-01018937/PDF/SPLC2014-3DPrinting.pdf},
  month_numeric = {7}
}
@inproceedings{acher:hal-00980126,
  hal_id = {hal-00980126},
  url = {http://hal.inria.fr/hal-00980126},
  title = {Software Diversity: Challenges to handle the imposed, Opportunities to harness the chosen},
  author = {Acher, Mathieu and Barais, Olivier and Baudry, Benoit and Blouin, Arnaud and Bourcier, Johann and Combemale, Benoit and J{\'e}z{\'e}quel, Jean-Marc and Plouzeau, No{\"e}l},
  abstract = {Diversity emerges as a critical concern that spans all activities in software engineering (from design to verification, from deployment to runtime resilience) and appears in all sorts of domains, which rely on software intensive systems, from systems of systems to pervasive combinations of Internet of Things and Internet of Services. If these domains are apparently radically different, we envision a strong convergence of the scientific principles underpinning their construction and validation towards flexible and open yet dependable systems. In this paper, we discuss the software engineering challenges raised by these requirements for flexibility and openness, focusing on four dimensions of diversity: the diversity of functionalities required by the different customers; the diversity of languages used by the stakeholders involved in the construction of these systems; the diversity of runtime environments in which software has to run and adapt; the diversity of failures against which the system must be able to react. In particular, we want to emphasize the challenges for handling imposed diversity, as well as the opportunities to leverage chosen diversity. The main challenge is that software diversity imposes to integrate the fact that software must adapt to changes in the requirements and environment -- in all development phases and in unpredictable ways. Yet, exploiting and increasing software diversity is a great opportunity to allow the spontaneous exploration of alternative software solutions and proactively prepare for unforeseen changes. Concretely, we want to provide software engineers with the ability: to characterize an 'envelope' of possible variations; to compose 'envelopes' (to discover new macro envelopes in an opportunistic manner); to dynamically synthesize software inside a given envelop.},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA},
  booktitle = {GDR GPL},
  address = {Paris, France},
  audience = {nationale},
  year = {2014},
  month = jun,
  pdf = {http://hal.inria.fr/hal-00980126/PDF/GDR-challenge.pdf},
  month_numeric = {6}
}
@inproceedings{vacchi:hal-01023864,
  hal_id = {hal-01023864},
  url = {http://hal.inria.fr/hal-01023864},
  title = {Automating Variability Model Inference for Component-Based Language Implementations},
  author = {Vacchi, Edoardo and Cazzola, Walter and Combemale, Benoit and Acher, Mathieu},
  abstract = {Recently, domain-specific language development has become again a topic of interest, as a means to help designing solutions to domain-specific problems. Componentized language frameworks, coupled with variability modeling, have the potential to bring language development to the masses, by simplifying the configuration of a new language from an existing set of reusable components. However, designing variability models for this purpose requires not only a good understanding of these frameworks and the way components interact, but also an adequate familiarity with the problem domain. In this paper we propose an approach to automatically infer a relevant variability model from a collection of already implemented language components, given a structured, but general representation of the domain. We describe techniques to assist users in achieving a better understanding of the relationships between language components, and find out which languages can be derived from them with respect to the given domain.},
  keywords = {Variability Models ; SW Product Lines ; DSL Implementation},
  language = {Anglais},
  affiliation = {Dipartimento di Scienze dell'Informazione [Milano] , Dipartimento di Informatica - ISLab , DIVERSE - INRIA - IRISA},
  booktitle = {18th International Software Product Line Conference (SPLC'14)},
  publisher = {ACM},
  address = {Florence, Italie},
  audience = {internationale },
  year = {2014},
  month = sep,
  pdf = {http://hal.inria.fr/hal-01023864/PDF/splc14-camera.pdf},
  month_numeric = {9}
}
@inproceedings{samih:hal-01025124,
  hal_id = {hal-01025124},
  url = {http://hal.inria.fr/hal-01025124},
  title = {An Approach to Derive Usage Models Variants for Model-based Testing},
  author = {Samih, Hamza and Le Guen, H{\'e}l{\`e}ne and Bogusch, Ralf and Acher, Mathieu and Baudry, Benoit},
  abstract = {Testing techniques in industry are not yet adapted for product line engineering (PLE). In particular, Model-based Testing (MBT), a technique that allows to automatically generate test cases from requirements, lacks support for managing variability (differences) among a set of related product. In this paper, we present an approach to equip usage models, a widely used formalism in MBT, with variability capabilities. Formal correspondences are established between a variability model, a set of functional requirements, and a usage model. An algorithm then exploits the traceability links to automatically derive a usage model variant from a desired set of selected features. The approach is integrated into the professional MBT tool MaTeLo and is currently used in industry.},
  keywords = {Product Line, Model-based Testing, Usage Model, Usage Model Variant, Orthogonal Variability Model, Requirements},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA , ALL4TEC RD [Laval] , Airbus Group [Germany]},
  booktitle = {26th IFIP International Conference on Testing Software and Systems (ICTSS'2014)},
  publisher = {Springer},
  address = {Madrid, Espagne},
  audience = {internationale },
  collaboration = {ALL4TEC, Airbus defence and space, INRIA, },
  year = {2014},
  month = sep,
  pdf = {http://hal.inria.fr/hal-01025124/PDF/MasterTex.pdf},
  month_numeric = {9}
}
@inproceedings{vanlanduyt:hal-01018938,
  hal_id = {hal-01018938},
  url = {http://hal.inria.fr/hal-01018938},
  title = {Towards Managing Variability in the Safety Design of an Automotive Hall Effect Sensor},
  author = {Van Landuyt, Dimitri and Op De Beeck, Steven and Hovsepyan, Aram and Michiels, Sam and Joosen, Wouter and Meynckens, Sven and De Jong, Gjalt and Barais, Olivier and Acher, Mathieu},
  abstract = {This paper discusses the merits and challenges of adopting software product line engineering (SPLE) as the main development process for an automotive Hall Effect sensor. This versatile component is integrated into a number of automotive applications with varying safety requirements (e.g., windshield wipers and brake pedals). This paper provides a detailed explanation as to why the process of safety assessment and verification of the Hall Effect sensor is currently cumbersome and repetitive:\~it must be repeated entirely for every automotive application in which the sensor is to be used. In addition, no support is given to the engineer to select and configure the appropriate safety solutions and to explain the safety implications of his decisions. To address these problems, we present a tailored SPLE-based approach that combines model-driven development with advanced model composition techniques for applying and reasoning about specific safety solutions. In addition, we provide insights about how this approach can reduce the overall complexity, improve reusability, and facilitate safety assessment of the Hall Effect sensor.},
  language = {Anglais},
  affiliation = {KU Leuven [Leuven] , Melexis [Ieper] , DIVERSE - INRIA - IRISA},
  booktitle = {18th International Software Product Line Conference (SPLC'14), industrial track},
  address = {Florence, Italie},
  audience = {internationale },
  year = {2014},
  month = jul,
  pdf = {http://hal.inria.fr/hal-01018938/PDF/splc.pdf},
  month_numeric = {7}
}
@inproceedings{bennasr:hal-01019537,
  hal_id = {hal-01019537},
  url = {http://hal.inria.fr/hal-01019537},
  title = {Moving Toward Product Line Engineering in a Nuclear Industry Consortium},
  author = {Ben Nasr, Sana and Sannier, Nicolas and Acher, Mathieu and Baudry, Benoit},
  abstract = {Nuclear power plants are some of the most sophisticated and complex energy systems ever designed. These systems perform safety critical functions and must conform to national safety institutions and international regulations. In many cases, regulatory documents provide very high level and ambiguous requirements that leave a large margin for interpretation. As the French nuclear industry is now seeking to spread its activities outside France, it is but necessary to master the ins and the outs of the variability between countries safety culture and regulations. This sets both an industrial and a scientific challenge to introduce and propose a product line engineering approach to an unaware industry whose safety culture is made of interpretations, specificities, and exceptions. This paper presents our current work within the French R\&D project CONNEXION, while introducing variability modeling to the French nuclear industry. In particular, we discuss the background, the quest for the best variability paradigm, the practical modeling of requirements variability as well as the mapping between variable requirements and variable architecture elements.},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA},
  booktitle = {18th International Software Product Line Conference (SPLC'2014), industrial track},
  address = {Florence, Italie},
  audience = {internationale },
  year = {2014},
  month = jul,
  pdf = {http://hal.inria.fr/hal-01019537/PDF/Ben-Nasr\_Sannier\_Acher\_Baudry\_Moving\_toward\_PLE\_in\_nuclear\_industry.pdf},
  month_numeric = {7}
}
@inproceedings{acher:hal-01020933,
  hal_id = {hal-01020933},
  url = {http://hal.inria.fr/hal-01020933},
  title = {ViViD: A Variability-Based Tool for Synthesizing Video Sequences},
  author = {Acher, Mathieu and Alferez, Mauricio and Galindo, Jos{\'e} A. and Romenteau, Pierre and Baudry, Benoit},
  abstract = {We present ViViD, a variability-based tool to synthesize variants of video sequences. ViViD is developed and used in the context of an industrial project involving consumers and providers of video processing algorithms. The goal is to synthesize synthetic video variants with a wide range of characteristics to then test the algorithms. We describe the key components of ViViD(1) a variability language and an environment to model what can vary within a video sequence; (2) a reasoning back-end to generate relevant testing configurations; (3) a video synthesizer in charge of producing variants of video sequences corresponding to configurations. We show how ViViD can synthesize realistic videos with different characteristics such as luminances, vehicles and persons that cover a diversity of testing scenarios.},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA , InPixal [Rennes]},
  booktitle = {18th International Software Product Line Conference (SPLC'14), tool track},
  address = {Florence, Italie},
  audience = {internationale },
  year = {2014},
  pdf = {http://hal.inria.fr/hal-01020933/PDF/paper.pdf}
}
@inproceedings{becan:hal-01022912,
  hal_id = {hal-01022912},
  url = {http://hal.inria.fr/hal-01022912},
  title = {WebFML: Synthesizing Feature Models Everywhere},
  author = {B{\'e}can, Guillaume and Ben Nasr, Sana and Acher, Mathieu and Baudry, Benoit},
  abstract = {Feature Models (FMs) are the de-facto standard for documenting, model checking, and reasoning about the configurations of a software system. This paper introduces WebFML a comprehensive environment for synthesizing FMs from various kinds of artefacts (e.g. propositional formula, dependency graph, FMs or product comparison matrices). A key feature of WebFML is an interactive support (through ranking lists, clusters, and logical heuristics) for choosing a sound and meaningful hierarchy. WebFML opens avenues for numerous practical applications (e.g., merging multiple product lines, slicing a configuration process, reverse engineering configurable systems).},
  keywords = {Ontologic-Aware Synthesis, Feature Modeling Environment, Reverse Engineering Feature Models},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA},
  booktitle = {18th International Software Product Line Conference (SPLC'14), tool track},
  address = {Florence, Italie},
  audience = {internationale },
  year = {2014},
  month = sep,
  slides = {http://fr.slideshare.net/acher/webfml-synthesizing-feature-models-everywhere-splc-2014},
  pdf = {http://hal.inria.fr/hal-01022912/PDF/SPLC2014-WebFML.pdf},
  month_numeric = {9}
}
@inproceedings{acher:hal-01024990,
  hal_id = {hal-01024990},
  url = {http://hal.inria.fr/hal-01024990},
  title = {SPLTea 2014: First International Workshop on Software Product Line Teaching},
  author = {Acher, Mathieu and Lopez-Herrejon, Roberto Erick and Rabiser, Rick},
  abstract = {Education has a key role to play for disseminating the constantly growing body of Software Product Line (SPL) knowledge. Teaching SPLs is challenging and it is unclear how SPLs can be taught, what are the possible benefits, or what is the material available. This workshop aims to explore and explain the current status and ongoing work on teaching SPLs at universities, colleges, and in industry (e.g., by consultants). Participants will discuss gaps and difficulties faced when teaching SPLs, benefits to research and industry, different ways to teach SPL knowledge, common threads, interests, and problems. The overall goal is to strengthen the important aspect of teaching in the SPL community.},
  keywords = {teaching; software product lines; configurator},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA , Johannes Kepler University Linz [linz] - JKU},
  booktitle = {18th International Software Product Line Conference (SPLC'14)},
  address = {Florence, Italie},
  audience = {internationale },
  year = {2014},
  month = jul,
  pdf = {http://hal.inria.fr/hal-01024990/PDF/spltea\_summary.pdf},
  month_numeric = {7}
}
@inproceedings{samih2014,
  hal_id = {hal-01002099},
  url = {http://hal.inria.fr/hal-01002099},
  title = {Deriving Usage Model Variants for Model-based Testing: An Industrial Case Study},
  author = {Samih, Hamza and Acher, Mathieu and Bogusch, Ralf and Le Guen, H{\'e}l{\`e}ne and Baudry, Benoit},
  abstract = {The strong cost pressure of the market and safety issues faced by aerospace industry affect the development. Suppliers are forced to continuously optimize their life-cycle processes to facilitate the development of variants for different customers and shorten time to market. Additionally, industrial safety standards like RTCA/DO-178C require high efforts for testing single products. A suitably organized test process for Product Lines (PL) can meet standards. In this paper, we propose an approach that adopts Model-based Testing (MBT) for PL. Usage models, a widely used MBT formalism that provides automatic test case generation capabilities, are equipped with variability information such that usage model variants can be derived for a given set of features. The approach is integrated in the professional MBT tool MaTeLo. We report on our experience gained from an industrial case study in the aerospace domain.},
  keywords = {Product Line, Model-based Testing, Usage Model, Orthogonal Variability Model, Requirements},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA , ALL4TEC RD , Airbus Group [Germany] , ARMOR - INRIA - IRISA , TRISKELL - INRIA - IRISA},
  booktitle = {19th International Conference on Engineering of Complex Computer Systems (ICECCS'14)},
  address = {Tianjin, Chine},
  audience = {internationale },
  collaboration = {ALL4TEC, Airbus defence and space, INRIA, },
  year = {2014},
  month = aug,
  pdf = {http://hal.inria.fr/hal-01002099/PDF/Deriving\_Usage\_Model\_Variants\_for\_Model\_based\_Testing\_An\_Industrial\_Case\_Study.pdf},
  month_numeric = {8}
}
@inbook{acherm2014book,
  chapter = {Software Architectures and Multiple Variability},
  title = {Software Architecture 2},
  publisher = {Wiley},
  year = {2014},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe},
  hal_id = {hal-01098107},
  abstract = { During the construction of software product lines, variability management is a crucial activity. A large number of software variants must be produced, in most cases, by using extensible architectures. In this chapter, we present the various applications of a set of modular management variability tools (FAMILIAR) for different forms of architecture (component-, service- and plug-in-based), and at different stages of the software life cycle. We discuss the lessons learnt from these studies and present guidelines for resolving recurring problems linked to multiple variability and to software architecture. },
  important = {1}
}
@inproceedings{galindoISSTA14,
  author = {Galindo, Jos\'{e} A. and Alferez, Mauricio and Acher, Mathieu and Baudry, Benoit and Benavides, David},
  title = {A Variability-based Testing Approach for Synthesizing Video Sequences},
  booktitle = {International Symposium on Software Testing and Analysis (ISSTA'14)},
  year = {2014},
  important = {1},
  hal_id = {hal-01003148},
  ar = {28\%},
  abstract = { A key problem when developing video processing software is the difficulty to test different input combinations. In this paper, we present VANE, a variability-based testing approach to derive video sequence variants. The ideas of the approach are i) to encode what can vary within a video sequence in a variability model; ii) exploit the variability model to generate a certain number of testable configurations; iii) synthesize variants of video sequences corresponding to configurations. VANE computes T-wise covering sets while maximizing a function over quality attributes. Also, we present a preliminary validation of the VANE approach in the context of an industrial project involving the test of video processing algorithms. }
}
@inproceedings{sannier:hal-00927312,
  hal_id = {hal-00927312},
  title = {Comparing or Configuring Products: Are We Getting the Right Ones?},
  author = {Sannier, Nicolas and Bécan, Guillaume and Acher, Mathieu and Ben Nasr, Sana and Baudry, Benoit},
  abstract = {Product comparators and configurators aim to assist customers in choosing a product that meets their expectations. While comparators present similarities and differences between competing products, configurators propose an assisted environment to gradually choose and customize products. The two systems have pros and cons and are inherently different. But both share the same variability information background and operate over a set of (possible) products, typically represented through product comparison matrices (PCMs). A key issue is that current PCMs have no clear semantics, making their analysis and transformations imprecise and hard. In this paper, we sketch a research plan for generating dedicated comparators or configurators from PCMs. The core of our vision is the use of formal variability models to encode PCMs and enables a further exploitation by developers of comparators or configurators. We elaborate on five research questions and describe the expected outputs of the research.},
  keywords = {configuration, configurator, comparator, variability, models, product comparison matrices, software product line engineering},
  language = {Anglais},
  affiliation = {DIVERSE - INRIA - IRISA},
  booktitle = {8th International Workshop on Variability Modelling of Software-intensive Systems (VaMoS'14)},
  publisher = {ACM},
  address = {Nice, France},
  audience = {internationale },
  year = {2014},
  ar = {38\%},
  slides = {http://fr.slideshare.net/gbecan/vamos2014},
  workshop = {1},
  month = jan,
  month_numeric = {1}
}
@inproceedings{fahrenberg:hal-00927310,
  hal_id = {hal-00927310},
  url = {http://hal.inria.fr/hal-00927310},
  title = {Sound Merging and Differencing for Class Diagrams},
  author = {Fahrenberg, Uli and Acher, Mathieu and Legay, Axel and Wasowski, Andrzej},
  abstract = {Class diagrams are among the most popular modeling languages in industrial use. In a model-driven development process, class diagrams evolve, so it is important to be able to assess differences between revisions, as well as to propagate differences using suitable merge operations. Existing differencing and merging methods are mainly syntactic, concentrating on edit operations applied to model elements, or they are based on sampling: enumerating some examples of instances which characterize the difference between two diagrams. This paper presents the first known (to the best of our knowledge) automatic model merging and differencing operators supported by a formal semantic theory guaranteeing that they are semantically sound. All instances of the merge of a model and its difference with another model are automatically instances of the second model. The differences we synthesize are represented using class diagram notation (not edits, or instances), which allows creation of a simple yet flexible algebra for diffing and merging. It also allows presenting changes comprehensively, in a notation already known to users.},
  keywords = {model-driven engineering, model composition, model merging, model differencing, algebra, model synthesis},
  language = {Anglais},
  affiliation = {DISTRIBCOM - INRIA - IRISA , DIVERSE - INRIA - IRISA , IT University of Copenhagen - IT},
  booktitle = {17th International Conference on Fundamental Approaches to Software Engineering (FASE'14)},
  publisher = {Springer},
  address = {Grenoble, France},
  series = {LNCS},
  audience = {internationale },
  year = {2014},
  ar = {23\%},
  month = apr,
  important = {1},
  month_numeric = {4}
}
@techreport{becan:hal-00874867,
  hal_id = {hal-00874867},
  url = {http://hal.inria.fr/hal-00874867},
  title = {Breathing Ontological Knowledge Into Feature Model Management},
  author = {Bécan, Guillaume and Acher, Mathieu and Baudry, Benoit and Ben Nasr, Sana},
  abstract = {Feature Models (FMs) are a popular formalism for modeling and reasoning about the configurations of a software product line. As the manual construction or management of an FM is time-consuming and error-prone for large software projects, recent works have focused on automated operations for reverse engineering or refactoring FMs from a set of configurations/dependencies. Without prior knowledge, meaningless ontological relations (as defined by the feature hierarchy and groups) are likely to be synthesized and cause severe difficulties when reading, maintaining or exploiting the resulting FM. In this paper we define a generic, ontological-aware synthesis procedure that guides users when identifying the likely siblings or parent candidates for a given feature. We develop and evaluate a series of heuristics for clustering/weighting the logical, syntactic and semantic relationships between features. Empirical experiments on hundreds of FMs, coming from the SPLOT repository and Wikipedia, show that an hybrid approach mixing logical and ontological techniques outperforms state-of-the-art solutions and offers the best support for reducing the number of features a user has to consider during the interactive selection of a hierarchy.},
  keywords = {feature models, model synthesis, model management, software product lines, software engineering, configuration management, merging, slicing, refactoring, reverse engineering},
  language = {Anglais},
  affiliation = {TRISKELL - INRIA - IRISA},
  pages = {15},
  type = {Rapport Technique},
  institution = {INRIA},
  number = {RT-0441},
  year = {2013},
  month = oct,
  other = {1},
  pdf = {http://hal.inria.fr/hal-00874867/PDF/RT-441.pdf},
  month_numeric = {10}
}
@inbook{acherm2014bookb,
  chapter = {Architectures logicielles et variabilité multiple},
  title = {Architectures logicielles et variabilité multiple},
  publisher = {Lavoisier},
  hal_id = {hal-01098109},
  year = {2014},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe},
  abstract = { Lors de la construction de lignes de produits logiciels, la gestion de la variabilité est une activité cruciale. Un très grand nombre de variantes logicielles doit être réalisé, le plus souvent en utilisant des architectures extensibles. Dans ce chapitre, nous présentons plusieurs applications d'un outillage (FAMILIAR) de gestion modulaire de la variabilité à différentes formes d'architectures (à base de composants, de services, de plugins) et à différents stades du cycle de vie logiciel. Nous discutons les leçons apprises de ces études, ainsi que quelques problèmes ouverts liés à la variabilité et aux architectures logicielles. }
}
@inproceedings{khalilabbasi2014,
  title = {Reverse Engineering Web Configurators},
  hal_id = {hal-00913139},
  author = {Khalil Abbasi, Ebrahim and Acher, Mathieu and Heymans, Patrick and Cleve, Anthony},
  abstract = {A Web configurator offers a highly interactive environment to assist users in customising sales products through the selection of configuration options. Our previous empirical study revealed that a significant number of configurators are suboptimal in reliability, efficiency, and maintainability, opening avenues for re-engineering support and methodologies. This paper presents a tool-supported reverse-engineering process to semi-automatically extract configuration-specific data from a legacy Web configurator. The extracted and structured data is stored in formal models (e.g., variability models) and can be used in a forward-engineering process to generate a customized interface with an underlying reliable reasoning engine. Two major components are presented: (1) a Web Wrapper that extracts structured configuration-specific data from unstructured or semi-structured Web pages of a configurator, and (2) a Web Crawler that explores the configuration space (i.e., all objects representing configuration-specific data) and simulates users' configuration actions. We describe variability data extraction patterns, used on top of the Wrapper and the Crawler to extract configuration data. Experimental results on five existing Web configurators show that the specification of a few variability patterns enable the identification of hundreds of options. },
  booktitle = {17th European Conference on Software Maintenance and Reengineering (CSMR'14)},
  address = {Antwerp, Belgium},
  audience = {internationale},
  year = {2014},
  ar = {31\%},
  month = feb,
  month_numeric = {2}
}
@inproceedings{acher:hal-00916746,
  hal_id = {hal-00916746},
  url = {http://hal.inria.fr/hal-00916746},
  title = {A Survey on Teaching of Software Product Lines},
  author = {Acher, Mathieu and Lopez-Herrejon, Roberto Erick and Rabiser, Rick},
  abstract = {With around two decades of existence, the community of Software Product Line (SPL) researchers and practitioners is thriving as can be attested by the extensive research output and the numerous successful industrial projects. Education has a key role to support the next generation of engineers to build highly complex SPLs. Yet, it is unclear how SPLs are taught, what are the possible missing gaps and difficulties faced, what are the benefits, or what is the material available. In this paper, we carry out a survey with over 30 respondents with the purpose of capturing a snapshot of the state of teaching in our community. We report and discuss quantitative as well as qualitative results of the survey. We build upon them and sketch six concrete actions to continue improving the state of practice of SPL teaching.},
  keywords = {teaching ; software engineering ; software product lines ; variability ;},
  language = {Anglais},
  affiliation = {TRISKELL - INRIA - IRISA , Johannes Kepler University Linz [linz] - JKU},
  booktitle = {Eight International Workshop on Variability Modelling of Software-Intensive Systems (VaMoS'14)},
  publisher = {ACM},
  address = {Nice, France},
  audience = {internationale },
  year = {2014},
  workshop = {1},
  slides = {http://fr.slideshare.net/acher/teaching-30301536},
  month = jan,
  month_numeric = {1}
}
@inproceedings{sannier2013,
  hal_id = {hal-00858491},
  url = {http://hal.inria.fr/hal-00858491},
  title = {From Comparison Matrix to Variability Model: The Wikipedia Case Study},
  author = {Sannier, Nicolas and Acher, Mathieu and Baudry, Benoit},
  abstract = {Product comparison matrices (PCMs) provide a convenient way to document the discriminant features of a family of related products and now abound on the internet. Despite their apparent simplicity, the information present in existing PCMs can be very heterogeneous, partial, ambiguous, hard to exploit by users who desire to choose an appropriate product. Variability Models (VMs) can be employed to formulate in a more precise way the semantics of PCMs and enable automated reasoning such as assisted configuration. Yet, the gap between PCMs and VMs should be precisely understood and automated techniques should support the transition between the two. In this paper, we propose variability patterns that describe PCMs content and conduct an empirical analysis of 300+ PCMs mined from Wikipedia. Our findings are a first step toward better engineering techniques for maintaining and configuring PCMs.},
  keywords = {Product comparison Matrices, variability models, variability patterns},
  language = {Anglais},
  affiliation = {TRISKELL - INRIA - IRISA},
  booktitle = {28th IEEE/ACM International Conference on Automated Software Engineering (ASE'13)},
  address = {Palo Alto, {\'E}tats-Unis},
  ar = {23\%},
  audience = {internationale },
  year = {2013},
  pdf = {http://hal.inria.fr/hal-00858491/PDF/ASE2013\_Sannier\_Acher\_Baudry.pdf},
  important = {1},
  slides = {http://fr.slideshare.net/acher/product-comparison-matrix-pcm-and-wikipedia}
}
@inproceedings{marianela2013,
  title = {Interactive Visualisation of Products in Online Configurators: A Case Study for Variability Modelling Technologies},
  author = {Felice, Marianela Ciolfi and Filho, Joao Bosco Ferreira and Acher, Mathieu and Blouin, Arnaud and Barais, Olivier},
  booktitle = {MAPLE/SCALE 2013 at SPLC 2013 Joint Workshop of MAPLE 2013 -- 5th International Workshop on Model-driven Approaches in Software Product Line Engineering and SCALE 2013 -- 4th Workshop on Scalable Modeling Techniques for Software Product Lines},
  year = {2013},
  hal_id = {hal-00842656},
  workshop = {1},
  abstract = { Numerous companies develop interactive environments to assist users in customising sales products through the selection of configuration options. A visual representation of these products is an important factor in terms of user experience. However, an analysis of 60+ existing configurators highlights that not all provide visual representations of configured products and few of them do it in an user-friendly manner. One of the current challenges is the trade-off developers face between either the memory consuming use of pregenerated images of all the combinations of options, or rendering products on the fly, which is non trivial to implement efficiently. We believe that a new approach to associate product configurations to visual representations is needed to compose and render them dynamically. In this paper we present a formal statement of the problem and a model-driven perspective for addressing it as well as ongoing and future work. We argue the case study can serve as an empirical method to explore the adequacy of variability (modelling) technologies. }
}
@inproceedings{acherMODELS13,
  title = {Composing your Compositions of Variability Models},
  author = {Acher, Mathieu and Combemale, Benoit and Collet, Philippe and Barais, Olivier and Lahire, Philippe and France, Robert B.},
  booktitle = {ACM/IEEE 16th International Conference on Model Driven Engineering Languages and Systems (MODELS'13)},
  year = {2013},
  ar = {23\%},
  hal_id = {hal-00859473},
  abstract = { Modeling and managing variability is a key activity in a growing number of software engineering contexts. Support for composing variability models is arising in many engineering scenarios, for instance, when several subsystems or modeling artifacts, each coming with their own variability and possibly developed by different stakeholders, should be combined together. In this paper, we consider the problem of composing feature models (FMs), a widely used formalism for representing and reasoning about a set of variability choices. We show that several composition operators can actually be defined, depending on both matching/merging strategies and semantic properties expected in the composed FM. We present four alternative forms and their implementations. We discuss their relative trade-offs w.r.t. reasoning, customizability, traceability, composability and quality of the resulting feature diagram. We summarize these findings in a reading grid which is validated by revisiting some relevant existing works. Our contribution should assist developers in choosing and implementing the right composition operators. },
  important = {1}
}
@inproceedings{collet:hal-00913157,
  hal_id = {hal-00913157},
  url = {http://hal.inria.fr/hal-00913157},
  title = {Feature Model Management: Smart Operations and Language Support (tutorial)},
  author = {Collet, Philippe and Lahire, Philippe and Acher, Mathieu and France, Robert},
  abstract = {Variability modelling and management is pervasive in a growing number of software engineering contexts (e.g., software product lines, dynamic adaptive systems). Feature models are the de facto standard to formally represent and reason about commonality and variability of a software system. This tutorial aims at presenting feature modelling languages and tools, directly applicable to a wide range of model-based variability problems and application domains. We will explain how to import, export, compose, decompose, edit, configure, compute diffs, refactor, reverse engineer, test, or reason about (multiple) feature models. We will also illustrate how these "smart" operations can be combined to realize complex variability management tasks. Participants (being practitioners or academics, beginners or advanced) will learn the principles and foundations of tool-supported techniques dedicated to the model-based management of variability.},
  keywords = {variability ; software product lines ; feature model ; slicing ; merging ; reverse engineering},
  language = {Anglais},
  affiliation = {Laboratoire d'Informatique, Signaux, et Syst{\`e}mes de Sophia-Antipolis (I3S) / Equipe MODALIS , TRISKELL - INRIA - IRISA , Colorado State University - CSU},
  booktitle = {ACM/IEEE 16th International Conference on Model Driven Engineering Languages and Systems (MODELS'13)},
  address = {Miami, {\'E}tats-Unis},
  audience = {internationale },
  year = {2013},
  tutorial = {1},
  month = sep,
  month_numeric = {9}
}
@inproceedings{FSE13,
  title = {Feature Model Extraction from Large Collections of Informal Product Descriptions},
  author = {Davril, Jean-Marc and Delfosse, Edouard and Hariri, Negar and Acher, Mathieu and Cleland-Huang, Jane and Heymans, Patrick},
  booktitle = {European Software Engineering Conference and the ACM SIGSOFT Symposium on the Foundations of Software Engineering (ESEC/FSE'13)},
  year = {2013},
  ar = {20\%},
  hal_id = {hal-00859475},
  pages = { 290-300},
  important = {1},
  abstract = { Feature Models (FMs) are used extensively in software product line engineering to help generate and validate individual product configurations and to provide support for domain analysis. As FM construction can be tedious and time-consuming, researchers have previously developed techniques for extracting FMs from sets of formally specified individual configurations, or from software requirements specifications for families of existing products.  However, such artifacts are often not available. In this paper we present a novel, automated approach for constructing FMs from publicly available product descriptions found in online product repositories and marketing websites such as SoftPedia and CNET.  While each individual product description provides only a partial view of features in the domain, a large set of descriptions can provide fairly comprehensive coverage. Our approach utilizes hundreds of partial product descriptions to construct an FM and is described and evaluated against antivirus product descriptions mined from SoftPedia. }
}
@inproceedings{bosco2013,
  title = {Generating Counterexamples of Model-based Software Product Lines: An Exploratory Study},
  author = {Filho, João Bosco Ferreira and Barais, Olivier and Acher, Mathieu and Noir, Jérôme Le and Baudry, Benoit},
  booktitle = {17th International Conference on Software Product Lines (SPLC'13)},
  year = {2013},
  abstract = { Model-based Software Product Line (MSPL) engineering aims at deriving customized models corresponding to individual products of a family. MSPL approaches usually promote the joint use of a variability model, a base model expressed in a specific formalism, and a realization layer that maps variation points to model elements. The design space of an MSPL is extremely complex to manage for the engineer, since the number of variants may be exponential and the derived product models have to be conformant to numerous well-formedness and business rules. In this paper, the objective is to provide a way to generate MSPLs, called counterexamples, that can produce invalid product models despite a valid configuration in the variability model. We provide a systematic and automated process, based on the Common Variability Language (CVL), to randomly search the space of MSPLs for a specific formalism. We validate the effectiveness of this process for three formalisms at different scales (up to 247 metaclasses and 684 rules). We also explore and discuss how counterexamples could guide practitioners when customizing derivation engines, when implementing checking rules that prevent early incorrect CVL models, or simply when specifying an MSPL. },
  hal_id = {hal-00837523},
  ar = {33\%},
  note = {Best Student Paper Award},
  important = {1}
}
@inproceedings{acherEC2013,
  title = {Model-Based Variability Management (tutorial)},
  author = {Acher, Mathieu and Combemale, Benoit and Barais, Olivier},
  year = {2013},
  booktitle = {Three co-located international conferences ECOOP'13, ECMFA'13 and ECSA'13},
  series = {{}},
  address = {France (Montpellier)},
  month = jul,
  days = {july 3},
  organization = {},
  publisher = {},
  pages = {},
  lang = {english},
  isbn = {},
  tutorial = {1},
  url = {http://www.lirmm.fr/ecmfa13/?id=158#tutorial10},
  sorte = {colin},
  slides = {http://fr.slideshare.net/acher/ec2013-tutorialmb-variabilityfinal},
  abstract = { The customization of almost everything is observed in a wide range of domains. Many organizations should address the challenge of extending, changing, customizing or configuring numerous kinds of systems and artefacts (requirements, components, services, languages, architectural or design models, codes, user interfaces, etc.) for use in a particular context. As a result, modeling and managing variability of such systems and artefacts is a crucial activity in a growing number of software engineering contexts (e.g., software product lines, dynamic adaptive architectures). Numerous model-based techniques have been proposed and usually consist in i) a variability model (e.g., a feature model), ii) a model (e.g., a class diagram) expressed in a domain-specific modeling language (e.g., Unified Modelling language), and iii) a realization layer that maps and transforms variation points into model elements. Based on a selection of desired features in the variability model, a derivation engine can automatically synthesise customized models – each model corresponding to an individual product. In this tutorial, we present the foundations and tool-supported techniques of state-of-the-art variability modeling technologies. In the first part, we briefly exemplify the management of variability in some systems/artefacts (design models, languages, product configurators). We introduce the Common Variability Language (CVL), a representative approach and ongoing effort involving both academic and industry partners to promote standardization variability modeling technology. In the second part, we focus on feature models the most popular notation to formally represent and reason about commonality and variability of a software system. Feature modelling languages and tools, directly applicable to a wide range of model-based variability problems and application domains, are presented. The FAMILIAR language and environment is used to perform numerous management operations like the import, export, compose, decompose, edit, configure, compute diffs, refactor, reverse engineer, test, or reason about (multiple) feature models. We describe their theoretical foundations, efficient implementations, and how these operations can be combined to realize complex variability management tasks. In the third part, we show how to combine feature models and other modeling artefacts. We revisit the examples given in the first part of the tutorial, using the Kermeta workbench and familiarCVL, an implementation of CVL. Finally we present some of the ongoing challenges for variability modeling. At the end of the tutorial, participants (being practitioners or academics, beginners or advanced) will learn languages, tools and novel variability modeling techniques they can directly use in their industrial contexts or as part of their research. },
  month_numeric = {7}
}
@article{acher-cleve-etal:2013,
  title = {Extraction and Evolution of Architectural Variability Models in Plugin-based Systems},
  author = {Acher, Mathieu and Cleve, Anthony and Collet, Philippe and Merle, Philippe and Duchien, Laurence and Lahire, Philippe},
  year = {2013},
  hal_id = {hal-00859472},
  journal = {Software and Systems Modeling (SoSyM)},
  important = {1},
  abstract = { Variability management is a key issue when building and evolving software-intensive systems, making it possible to extend, configure, customize and adapt such systems to customers' needs and specific deployment contexts. A wide form of variability can be found in extensible software systems, typically built on top of plugin-based architectures that offer a (large) number of configuration options through plugins. In an ideal world, a software architect should be able to generate a system variant on-demand, corresponding to a particular assembly of plugins. To this end, the variation points and constraints between architectural elements should be properly modeled and maintained over time (i.e., for each version of an architecture). A crucial, yet error-prone and time-consuming, task for a software architect is to build an accurate representation of the variability of an architecture, in order to prevent unsafe architectural variants and reach the highest possible level of flexibility. In this article, we propose a reverse engineering process for producing a variability model (i.e., a feature model) of a plugin-based architecture. We develop automated techniques to extract and combine different variability descriptions, including a hierarchical software architecture model, a plugin dependency model and the software architect knowledge. By computing and reasoning about differences between versions of architectural feature models, software architect can control both the variability extraction and evolution processes. The proposed approach has been applied to a representative, large-scale plugin-based system (FraSCAti), considering different versions of its architecture. We report on our experience in this context. }
}
@inproceedings{khalilabbasi:hal-00796555,
  hal_id = {hal-00796555},
  url = {http://hal.inria.fr/hal-00796555},
  title = {The Anatomy of a Sales Configurator: An Empirical Study of 111 Cases},
  author = {Khalil Abbasi, Ebrahim and Hubaux, Arnaud and Acher, Mathieu and Boucher, Quentin and Heymans, Patrick},
  abstract = {Nowadays, mass customization has been embraced by a large portion of the industry. As a result, the web abounds with sales config- urators that help customers tailor all kinds of goods and services to their specific needs. In many cases, configurators have become the sin- gle entry point for placing customer orders. As such, they are strategic components of companies' information systems and must meet stringent reliability, usability and evolvability requirements. However, the state of the art lacks guidelines and tools for efficiently engineering web sales configurators. To tackle this problem, empirical data on current practice is required. The first part of this paper reports on a systematic study of 111 web sales configurators along three essential dimensions: rendering of configuration options, constraint handling, and configuration process support. Based on this, the second part highlights good and bad prac- tices in engineering web sales configurator. The reported quantitative and qualitative results open avenues for the elaboration of methodolo- gies to (re-)engineer web sales configurators.},
  language = {Anglais},
  affiliation = {PReCISE Research Centre in Information Systems Engineering - PReCISE , TRISKELL - INRIA - IRISA},
  booktitle = {25th International Conference on Advanced Information Systems Engineering (CAiSE'13)},
  address = {Valencia, Espagne},
  audience = {internationale },
  year = {2013},
  ar = {16.6\%},
  month = jun,
  important = {1},
  month_numeric = {6}
}
@inproceedings{acher:hal-00766786,
  hal_id = {hal-00766786},
  title = {Support for Reverse Engineering and Maintaining Feature Models},
  author = {Acher, Mathieu and Heymans, Patrick and Cleve, Anthony and Hainaut, Jean-Luc and Baudry, Benoit},
  abstract = {Feature Models (FMs) are a popular formalism for modelling and reasoning about commonality and variability of a system. In essence, FMs aim to define a set of valid combinations of features, also called configurations. In this paper, we tackle the problem of synthesising an FM from a set of configurations. The main challenge is that numerous candidate FMs can be extracted from the same input configurations, yet only a few of them are meaningful and maintainable. We first characterise the different meanings of FMs and identify the key properties allowing to discriminate between them. We then develop a generic synthesis procedure capable of restituting the intended meanings of FMs based on inferred or user-specified knowledge. Using tool support, we show how the integration of knowledge into FM synthesis can be realized in different practical application scenarios that involve reverse engineering and maintaining FMs.},
  keywords = {variability; reverse engineering ; model synthesis ; software product line ; model management ; maintenance},
  language = {Anglais},
  affiliation = {TRISKELL - INRIA - IRISA , PReCISE Research Centre in Information Systems Engineering - PReCISE , ADAM - INRIA Lille - Nord Europe , Universit{\'e} Lille 1 , Laboratoire d'Informatique Fondamentale de Lille - LIFL},
  booktitle = {Seventh International Workshop on Variability Modelling of Software-Intensive Systems (VaMoS'13)},
  publisher = {ACM},
  address = {Pisa, Italie},
  audience = {internationale},
  year = {2013},
  ar = {42\%},
  workshop = {1},
  month = jan,
  month_numeric = {1}
}
@article{acher-collet-etal:2013,
  title = {FAMILIAR: A Domain-Specific Language for Large Scale Management of Feature Models},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  year = {2013},
  hal_id = {hal-00767175},
  journal = {Science of Computer Programming (SCP) Special issue on programming languages},
  pages = {55},
  lang = {english},
  isbn = {},
  doi = {http://dx.doi.org/10.1016/j.scico.2012.12.004},
  url = {},
  important = {1},
  sorte = {revue int},
  abstract = { The feature model formalism has become the de facto standard for managing variability in software product lines (SPLs). In practice, developing an SPL can involve modeling a large number of features representing di erent viewpoints, sub-systems or concerns of the software system. This activity is generally tedious and error-prone. In this article, we present FAMILIAR a Domain-Specific Language (DSL) that is dedicated to the large scale management of feature models and that complements existing tool support. The language provides a powerful support for separating concerns in feature modeling, through the provision of composition and decomposition operators, reasoning facilities and scripting capabilities with modularization mechanisms. We illustrate how an SPL consisting of medical imaging services can be practically managed using reusable FAMILIAR scripts that implement reasoning mechanisms. We also report on various usages and applications of FAMILIAR and its operators, to demonstrate their applicability to di erent domains and use for di erent purposes. }
}
@inbook{hubaux-acher-etal:2013,
  chapter = {Separating Concerns in Feature Models: Retrospective and Multi-View Support},
  title = {Domain Engineering: Product Lines, Conceptual Models, and Languages (editors: Reinhartz-Berger,I. and Sturm, A. and Clark, T. and Bettin, J. and Cohen, S.)},
  publisher = {Springer},
  year = {2013},
  author = {Hubaux, Arnaud and Acher, Mathieu and Tun, T. T. and Heymans, Patrick and Collet, Philippe and Lahire, Philippe},
  hal_id = {hal-00767213},
  abstract = { Feature models (FMs) are a popular formalism to describe the commonality and variability of a set of assets in a software product line (SPL). SPLs usually involve large and complex FMs that describe thousands of features whose legal combinations are governed by many and often complex rules. The size and complexity of these models is partly explained by the large number of concerns considered by SPL practitioners when managing and configuring FMs. In this chapter, we first survey concerns and their separation in FMs, highlighting the need for more modular and scalable techniques. We then revisit the concept of view as a simplified representation of an FM. We finally describe a set of techniques to specify, visualize and verify the coverage of a set of views. These techniques are implemented in complementary tools providing practical support for feature-based configuration and large scale management of FMs. },
  important = {1}
}
@inproceedings{acher-heymans-etal:2012,
  title = {Feature Model Differences},
  hal_id = {hal-00713849},
  author = {Acher, Mathieu and Heymans, Patrick and Collet, Philippe and Quinton, Cl{\'e}ment and Lahire, Philippe and Merle, Philippe},
  year = {2012},
  booktitle = {24th International Conference on Advanced Information Systems Engineering (CAiSE'12)},
  series = {{LNCS}},
  address = {},
  month = jun,
  days = {25-29 June},
  organization = {},
  publisher = {Springer},
  pages = {},
  important = {1},
  lang = {english},
  isbn = {},
  ar = {14\%},
  url = {https://nyx.unice.fr/publis/acher-heymans-etal:2012.pdf},
  sorte = {colin},
  abstract = {Feature models are a widespread means to represent commonality and variability in software product lines. As is the case for other kinds of models, computing and managing feature model differences is useful in various real-world situations. In this paper, we propose a set of novel differencing techniques that combine syntactic and semantic mechanisms, and automatically produce meaningful differences. Practitioners can exploit our results in various ways: to understand, manipulate, visualize and reason about differences. They can also combine them with existing feature model composition and decomposition operators. The proposed automations rely on satisfiability algorithms. They come with a dedicated language and a comprehensive environment. We illustrate and evaluate the practical usage of our techniques through a case study dealing with a configurable component framework.},
  month_numeric = {6}
}
@inproceedings{acher-collet-etal:2012,
  title = {Separation of Concerns in Feature Modeling: Support and Applications},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  year = {2012},
  booktitle = {Aspect-Oriented Software Development (AOSD'12)},
  series = {{}},
  address = {},
  month = mar,
  important = {1},
  organization = {},
  publisher = {ACM},
  pages = {},
  hal_id = {hal-00767423},
  lang = {english},
  isbn = {},
  url = {http://hal.inria.fr/docs/00/76/74/23/PDF/acher-collet-etal-2012.pdf},
  ar = {25\%},
  sorte = {colin},
  abstract = {Feature models (FMs) are a popular formalism for describing the commonality and variability of software product lines (SPLs) in terms of features. SPL development increasingly involves manipulating many large FMs, and thus scalable modular techniques that support compositional development of complex SPLs are required. In this paper, we describe how a set of complementary operators (aggregate, merge, slice) provides practical support for separation of concerns in feature modeling. We show how the combination of these operators can assist in tedious and error prone tasks such as automated correction of FM anomalies, update and extraction of FM views, reconciliation of FMs and reasoning about properties of FMs. For each task, we report on practical applications in different domains. We also present a technique that can efficiently decompose FMs with thousands of features and report our experimental results.},
  month_numeric = {3}
}
@article{acher-collet-etal:2011g,
  title = {Composing Multiple Variability Artifacts to Assemble Coherent Workflows},
  author = {Acher, Mathieu and Collet, Philippe and Gaignard, Alban and Lahire, Philippe and Montagnat, Johan and France, Robert},
  month = sep,
  year = {2012},
  important = {1},
  journal = {Software Quality Journal Special issue on Quality Engineering for Software Product Lines},
  volume = {20},
  number = {{3-4}},
  hal_id = {hal-00733556},
  pages = {689--734},
  lang = {english},
  isbn = {},
  url = {http://hal.archives-ouvertes.fr/hal-00733556/PDF},
  sorte = {revue int},
  abstract = { The development of scientific workflows is evolving towards the systematic use of service oriented architectures, enabling the composition of dedicated and highly parameterized software services into processing pipelines. Building consistent workflows then becomes a cumbersome and error-prone activity as users cannot manage such large scale variability. This paper presents a rigorous and tooled approach in which techniques from Software Product Line (SPL) engineering are reused and extended to manage variability in service and workflow descriptions. Composition can be facilitated while ensuring consistency. Services are organized in a rich catalog which is organized as a SPL and structured according to the common and variable concerns captured for all services. By relying on sound merging techniques on the feature models that make up the catalog, reasoning about the compatibility between connected services is made possible. Moreover, an entire workflow is then seen as a multiple SPL (i.e., a composition of several SPLs). When services are configured within, the propagation of variability choices is then automated with appropriate techniques and the user is assisted in obtaining a consistent workflow. The approach proposed is completely supported by a combination of dedicated tools and languages. Illustrations and experimental validations are provided using medical imaging pipelines, which are representative of current scientific workflows in many domains.},
  month_numeric = {9}
}
@inproceedings{acher-michel-etal:2012a,
  title = {Next-Generation Model-based Variability Management: Languages and Tools},
  author = {Acher, Mathieu and Michel, Rapha{\"e}l and Heymans, Patrick},
  year = {2012},
  pdf = {http://dl.acm.org/citation.cfm?id=2364469},
  booktitle = {ACM 16th International Software Product Line Conference (SPLC’12)},
  series = {{}},
  address = {Salvador (Brazil)},
  month = sep,
  days = {2-7 September},
  pages = {},
  hal_id = {hal-00718339},
  organization = {},
  publisher = {},
  lang = {english},
  isbn = {},
  url = {},
  sorte = {colin},
  month_numeric = {9}
}
@inproceedings{acher-heymans-etal:2012a,
  title = {Next-Generation Model-based Variability Management: Languages and Tools (tutorial)},
  author = {Acher, Mathieu and Heymans, Patrick and Collet, Philippe and Lahire, Philippe},
  year = {2012},
  booktitle = {ACM/IEEE 15th International Conference  on Model Driven Engineering Languages \& Systems (MODELS'2012)},
  series = {{}},
  tutorial = {1},
  address = {Innsbruck (Austria)},
  month = oct,
  days = {sep 30 - oct 5},
  organization = {},
  publisher = {},
  pages = {},
  lang = {english},
  isbn = {},
  url = {http://hal.inria.fr/docs/00/76/71/70/PDF/NextGenVariabilityMODELS12-tutorial.pdf},
  hal_id = {hal-00767170},
  sorte = {colin},
  abstract = {Variability modelling and management is a key activity in a growing number of software engineering contexts, from software product lines to dynamic adaptive systems. Feature models are the defacto standard to formally represent and reason about commonality and variability of a software system. This tutorial aims at presenting next generation of feature modelling languages and tools, directly applicable to a wide range of model-based variability problems and application domains. Participants (being practitioners or academics, beginners or advanced) will learn the principles and foundations of languages and tool-supported techniques dedicated to the model-based management of variability. },
  month_numeric = {10}
}
@inproceedings{acher-michel-ciel:2012b,
  title = {Next-Generation Model-based Variability Management: Languages and Tools (Tutorial)},
  author = {Acher, Mathieu and Michel, Rapha{\"e}l and Heymans, Patrick},
  year = {2012},
  booktitle = {Conf{\'e}rence en Ing{\'e}nieriE du Logiciel (CIEL'12)},
  series = {{}},
  address = {},
  organization = {},
  audience = {nationale},
  publisher = {},
  pages = {},
  lang = {english},
  isbn = {},
  url = {},
  sorte = {colna}
}
@inproceedings{moisan-rigault-etal:2012,
  title = {A Feature-based Approach to System Deployment and Adaptation},
  author = {Moisan, Sabine and Rigault, Jean-Paul and Acher, Mathieu},
  year = {2012},
  hal_id = {hal-00708745},
  booktitle = {Proceedings of the 2012 international workshop on Modeling in software engineering at ICSE 2012 (MiSE'12)},
  series = {{}},
  ar = {41\%},
  address = {Zurich},
  month = jun,
  days = {2-9 June},
  organization = {},
  publisher = {IEEE},
  workshop = {1},
  pages = {},
  lang = {english},
  isbn = {},
  url = {},
  sorte = {colin},
  month_numeric = {6}
}
@inproceedings{boucher-abbasi-etal:2012,
  title = {Towards More Reliable Configurators: A Re-engineering Perspective},
  author = {Boucher, Quentin and Abbasi, Ebrahim and Hubaux, Arnaud and Perrouin, Gilles and Acher, Mathieu and Heymans, Patrick},
  year = {2012},
  booktitle = {Third International Workshop on Product LinE Approaches in Software Engineering at ICSE 2012 (PLEASE'12)},
  ar = {57\%},
  series = {{}},
  address = {Zurich},
  month = jun,
  days = {4 June},
  organization = {},
  publisher = {},
  hal_id = {hal-00718278},
  pages = {},
  lang = {english},
  workshop = {1},
  isbn = {},
  url = {},
  sorte = {colin},
  abstract = { Delivering configurable solutions, that is products tailored to the requirements of a particular customer, is a priority of most B2B and B2C markets. These markets now heavily rely on interactive configurators that help customers build complete and correct products. Reliability is thus a critical requirement for configurators. Yet, our experience in industry reveals that many configurators are developed in an ad hoc manner, raising correctness and maintenance issues. In this paper, we present a vision to re-engineering more reliable configurators and the challenges it poses. The first challenge is to reverse engineer from an existing configurator the variability information, including complex rules, and to consolidate it in a variability model, namely a feature model. The second challenge is to forward engineer a new configurator that uses the feature model to generate a customized graphical user interface and the underlying reasoning engine. },
  month_numeric = {6}
}
@inproceedings{acher-michel-please:2012,
  title = {Languages and Tools for Managing Feature Models},
  author = {Acher, Mathieu and Michel, Rapha{\"e}l and Heymans, Patrick and Collet, Philippe and Lahire, Philippe},
  year = {2012},
  hal_id = {hal-00718325},
  booktitle = {Third International Workshop on Product LinE Approaches in Software Engineering at ICSE 2012 (PLEASE'12)},
  series = {{PLEASE}},
  ar = {57\%},
  address = {Zurich, Switzerland},
  month = jun,
  days = {4 June},
  workshop = {1},
  organization = {ACM},
  publisher = {},
  pages = {4},
  lang = {english},
  isbn = {},
  url = {},
  sorte = {colin},
  month_numeric = {6}
}
@inproceedings{lenoir2012,
  title = {Modelling variability using CVL; A step by step tutorial},
  author = {Le Noir, J{\'e}r{\^o}me and Barais, Olivier and Ferreira Filho, Joao Bosco and Acher, Mathieu and J{\'e}z{\'e}quel, Jean-Marc},
  language = {french},
  booktitle = {Journ{\'e}e Lignes de Produits},
  audience = {nationale},
  year = {2012},
  month = nov,
  month_numeric = {11}
}
@inproceedings{acher-cleve-etal:2012,
  title = {On Extracting Feature Models From Product Descriptions},
  author = {Acher, Mathieu and Cleve, Anthony and Perrouin, Gilles and Heymans, Patrick and Collet, Philippe and Lahire, Philippe and Vanbeneden, Charles},
  year = {2012},
  booktitle = {Sixth International Workshop on Variability Modelling of Software-intensive Systems (VaMoS'12)},
  series = {{VaMoS}},
  address = {Leipzig, Germany},
  month = jan,
  days = {25-27 january},
  hal_id = {hal-00718467},
  organization = {},
  publisher = {ACM},
  pages = {10},
  lang = {english},
  workshop = {1},
  isbn = {},
  url = {https://nyx.unice.fr/publis/acher-cleve-etal:2012.pdf},
  ar = {51\%},
  sorte = {colin},
  abstract = { In product line engineering, domain analysis is the process of analyzing related products to identify their common and variable features. This process is generally carried out by experts on the basis of existing product descriptions, which are expressed in a more or less structured way. Modeling and reasoning about product descriptions are error-prone and time consuming tasks. Feature models (FMs) consti- tute popular means to specify product commonalities and variabilities in a compact way, and to provide automated support to the domain analysis process. This paper aims at easing the transition from product descriptions expressed in a tabular format to FMs accurately representing them. This process is parameterized through a dedicated language and high-level directives (e.g., products/features scoping). We guarantee that the resulting FM represents the set of legal feature combinations supported by the considered products and has a readable tree hierarchy together with variability information. We report on our experiments based on public data and characterize the properties of the derived FMs. },
  month_numeric = {1}
}
@inproceedings{acher-cleve-etal:2011,
  title = {Reverse Engineering Architectural Feature Models},
  author = {Acher, Mathieu and Cleve, Anthony and Collet, Philippe and Merle, Philippe and Duchien, Laurence and Lahire, Philippe},
  year = {2011},
  booktitle = {5th European Conference on Software Architecture (ECSA'11), long paper},
  series = {{LNCS}},
  address = {Essen (Germany)},
  month = sep,
  days = {13-16 september},
  organization = {},
  important = {1},
  publisher = {Springer},
  pages = {16},
  lang = {english},
  isbn = {},
  url = {},
  slides = {http://fr.slideshare.net/acher/reverse-engineering-architectural-feature-models},
  hal_id = {inria-00614984},
  ar = {25\%},
  sorte = {colin},
  abstract = { Reverse engineering the variability of an existing system is a challenging activity. The architect knowledge is essential to identify variation points and explicit constraints between features, for instance in feature models (FMs), but the manual creation of FMs is both time-consuming and error-prone. On a large scale, it is very difficult for an architect to guarantee that the resulting FM ensures a safe composition of the architectural elements when some features are selected. In this paper, we present a comprehensive, tool supported process for reverse engineering architectural FMs. We develop automated techniques to extract and combine different variability descriptions of an architecture. Then, alignment and reasoning techniques are applied to integrate the architect knowledge and reinforce the extracted FM. We illustrate the reverse engineering process when applied to a representative software system, FraSCAti, and we report on our experience in this context. },
  month_numeric = {9}
}
@phdthesis{acher:2011,
  title = {Managing Multiple Feature Models: Foundations, Language, and Applications},
  author = {Acher, Mathieu},
  month = sep,
  year = {2011},
  address = {Nice, France},
  school = {University of Nice Sophia Antipolis},
  pages = {249},
  type = {},
  lang = {english},
  isbn = {},
  url = {},
  slides = {http://fr.slideshare.net/acher/acher-phd-thesis-defense},
  sorte = {these},
  month_numeric = {9}
}
@inproceedings{acher-collet-etal:2011h,
  title = {Decomposing Feature Models: Language, Environment, and Applications},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  year = {2011},
  booktitle = {Automated Software Engineering (ASE'11), short paper: demonstration track},
  series = {{}},
  address = {Lawrence, USA},
  month = nov,
  days = {6-10 november},
  organization = {},
  publisher = {IEEE/ACM},
  pages = {},
  lang = {english},
  isbn = {},
  slides = {http://fr.slideshare.net/acher/ase-tool-demonstration},
  doi = {10.1109/ASE.2011.6100135},
  url = {https://dl.acm.org/citation.cfm?id=2190078.2190137},
  sorte = {colin},
  abstract = {Variability in software product lines is often expressed through feature models (FMs). To handle the complexity of increasingly larger FMs, we propose semantically meaningful decomposition support through a slicing operator. We describe how the slicing operator is integrated into the FAMILIAR environment and how it can be combined with other operators to support complex tasks over FMs in different case studies.},
  month_numeric = {11}
}
@inproceedings{acher-collet-etal:2011e,
  title = {Slicing Feature Models},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  year = {2011},
  booktitle = {26th IEEE/ACM International Conference On Automated Software Engineering (ASE'11), short paper},
  series = {{}},
  address = {Lawrence, USA},
  month = nov,
  days = {6-10 november},
  organization = {},
  publisher = {IEEE/ACM},
  pages = {},
  lang = {english},
  isbn = {},
  url = {https://dl.acm.org/citation.cfm?id=2190091},
  slides = {http://fr.slideshare.net/acher/ase11-short-paper},
  doi = {10.1109/ASE.2011.6100089},
  sorte = {colin},
  abstract = {Feature models (FMs) are a popular formalism for describing the commonality and variability of software product lines (SPLs) in terms of features. As SPL development increasingly involves numerous large FMs, scalable modular techniques are required to manage their complexity. In this paper, we present a novel slicing technique that produces a projection of an FM, including constraints. The slicing allows SPL practitioners to find semantically meaningful decompositions of FMs and has been integrated into the FAMILIAR language.},
  important = {1},
  month_numeric = {11}
}
@inproceedings{moisan-rigault-etal:2011,
  title = {Run Time Adaptation of Video-Surveillance Systems: A Software Modeling Approach},
  author = {Moisan, Sabine and Rigault, Jean-Paul and Acher, Mathieu and Collet, Philippe and Lahire, Philippe},
  year = {2011},
  booktitle = {8th International Conference on Computer Vision Systems (ICVS'2011)},
  series = {{LNCS}},
  hal_id = {inria-00617279},
  address = {Sophia Antipolis (France)},
  month = sep,
  days = {20-22 september},
  organization = {},
  publisher = {Springer Verlag},
  pages = {},
  lang = {english},
  isbn = {},
  url = {},
  sorte = {colin},
  month_numeric = {9}
}
@inproceedings{acher-collet-etal:2011b,
  title = {Modeling Variability from Requirements to Runtime},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and Moisan, Sabine and Rigault, Jean-Paul},
  year = {2011},
  booktitle = {16th International Conference on Engineering of Complex Computer Systems (ICECCS'11)},
  series = {{}},
  address = {Las Vegas},
  month = apr,
  hal_id = {inria-00617273},
  days = {27-29 april},
  organization = {},
  publisher = {IEEE},
  pages = {},
  lang = {english},
  isbn = {},
  url = {},
  ar = {31\%},
  sorte = {colin},
  asbtract = {{ In software product line (SPL) engineering, a software configuration can be obtained through a valid selection of features represented in a feature model (FM). With a strong separation between requirements and reusable components and a deep impact of high level choices on technical parts, determining and configuring an well-adapted software configuration is a long, cumbersome and error-prone activity. This paper presents a modeling process in which variability sources are separated in different FMs and inter-related by propositional constraints while consistency checking and propagation of variability choices are automated. We show how the variability requirements can be expressed and then refined at design time so that the set of valid software configurations to be considered at runtime may be highly reduced. Software tools support the approach and some experimentations on a video surveillance SPL are also reported. }},
  month_numeric = {4}
}
@inproceedings{acher-collet-etal:2011a,
  title = {Managing Feature Models with FAMILIAR: a Demonstration of the Language and its Tool Support},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  year = {2011},
  booktitle = {Fifth International Workshop on Variability Modelling of Software-intensive Systems (VaMoS'11)},
  series = {{VaMoS}},
  address = {Namur, Belgium},
  month = jan,
  days = {27th-29th january},
  organization = {},
  publisher = {ACM},
  pages = {},
  ar = {55\%},
  lang = {english},
  isbn = {},
  url = {},
  workshop = {1},
  sorte = {colin},
  asbtract = {{ Developing software product lines involves modeling a large number of features, usually using feature models, that represent different viewpoints, sub-systems or concerns of the software system. To manage complexity on a large scale, there is a need to separate, relate and compose several feature models while automating the reasoning on their compositions. This demonstration gives an overview of a Domain-Specific Language, FAMILIAR, that is dedicated to the management of feature models. Its comprehensive programming environment, based on Eclipse, is also described. It complements existing tool support (i.e., FeatureIDE). }},
  month_numeric = {1}
}
@inproceedings{acher-collet-etal:2011,
  title = {A Domain-Specific Language for Managing Feature Models},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  year = {2011},
  booktitle = {Symposium on Applied Computing (SAC'11)},
  series = {{}},
  address = {Taiwan},
  month = mar,
  days = {21-25 march},
  organization = {Programming Languages Track},
  publisher = {ACM},
  pages = {},
  lang = {english},
  isbn = {},
  url = {https://nyx.unice.fr/publis/acher-collet-etal:2011.pdf},
  ar = {33\%},
  sorte = {colin},
  abstract = { Feature models are a popular formalism for managing variability in software product lines (SPLs). In practice, developing an SPL can involve modeling a large number of features representing different viewpoints, sub-systems or concerns of the software system. To manage complexity, there is a need to separate, relate and compose several feature models while automating the reasoning on their compositions in order to enable rigorous SPL validation and configuration. In this paper, we propose a Domain-Specific Language (DSL) that is dedicated to the management of feature models and that complements existing tool support. Rationale for this language is discussed and its main constructs are presented through examples. We show how the DSL can be used to realize a non trivial scenario in which multiple SPLs are managed. },
  month_numeric = {3}
}
@inproceedings{acher-collet-etal:2010d,
  title = {FAMILIAR, a Language and its Environment for Feature Model Management},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  year = {2010},
  booktitle = {Journ{\'e}e Lignes de Produits. Ma{\^\i}triser la Diversit{\'e}},
  series = {{}},
  address = {},
  month = oct,
  days = {20 october},
  organization = {Universit{\'e} Paris 1 - Panth{\'e}on Sorbonne},
  publisher = {},
  pages = {12},
  other = {1},
  lang = {english},
  isbn = {},
  url = {},
  audience = {nationale},
  sorte = {colna},
  asbtract = {{ Feature models are a popular formalism for managing variability in software product lines (SPLs). In practice, developing an SPL can involve modeling a large number of features representing different viewpoints, sub-systems or concerns of the software system. To manage complexity, there is a need to separate, relate and compose several feature models while automating the reasoning on their compositions in order to enable rigorous SPL validation and configuration. In this paper, we propose a domain-specific language, named FAMILIAR, that is dedicated to the management of feature models. We introduce the main constructs of the langage through examples and we describe its comprehensive programming environment, based on Eclipse, that complements existing tool support (i.e., FeatureIDE). }},
  month_numeric = {10}
}
@inproceedings{fagerengjohansen-fleurey-etal:2010,
  title = {Exploring the Synergies Between Feature Models and Ontologies},
  author = {Fagereng Johansen, Martin and Fleurey, Franck and Acher, Mathieu and Collet, Philippe and Lahire, Philippe},
  year = {2010},
  booktitle = {International Workshop on Model-driven Approaches in Software Product Line Engineering (MAPLE 2010)},
  series = {{SPLC'10 (Volume 2)}},
  address = {Jeju Island, South Korea},
  month = sep,
  days = {13-17 september},
  organization = {},
  url = {http://heim.ifi.uio.no/martifag/papers/Johansen2010.pdf},
  publisher = {Lancaster University},
  volume = {2},
  pages = {163-171},
  workshop = {1},
  lang = {english},
  isbn = {978-1-86220-274-0},
  sorte = {colin},
  abstract = { A factor slowing down the use of feature models is that either the concepts or the relations expressed in a feature model are not defined at all, or defined in an unsatisfactory manner; feature models are sometimes too vague to be analyzed by a reasoning tool. It is thus difficult to determine if the features in a feature model are arranged and structured consistently with domain knowledge and if they are accurately expressed, organized and represented. Ontology modeling can improve feature modeling by providing additional information relevant for the domain in which a feature model is constructed. Finding synergies between feature models and ontologies will aid an SPL engineer in accurately expressing, organizing and representing features in their feature models. In this paper, we look at potential benefits in using the two modeling formalisms together, we identify issues and challenges considering the gap between the two formalisms and discuss the importance of this gap. We report on our current ideas and results. },
  month_numeric = {9}
}
@misc{acher-collet-etal:2009a,
  title = {Modeling Context and Dynamic Adaptations with Feature Models},
  author = {Acher, Mathieu and Collet, Philippe and Fleurey, Franck and Lahire, Philippe and Moisan, Sabine and Rigault, Jean-Paul},
  month = oct,
  other = {1},
  year = {2009},
  lang = {english},
  hal_id = {hal-00419990},
  url = {http://nyx.unice.fr/publis/acher-collet-etal:2009a.pdf},
  note = {(MRT'09, poster)},
  sorte = {autre},
  month_numeric = {10}
}
@misc{acher-collet-etal:2011c,
  title = {FAMILIAR (FeAture Model scrIpt Language for manIpulation and Automatic Reasoning): https://nyx.unice.fr/projects/familiar/},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  month = jan,
  year = {2011},
  lang = {english},
  isbn = {},
  note = {},
  sorte = {web},
  other = {1},
  month_numeric = {1}
}
@misc{acher-cleve-etal:2011a,
  title = {Reverse Engineering Architectural Feature Models},
  author = {Acher, Mathieu and Cleve, Anthony and Collet, Philippe and Merle, Philippe and Duchien, Laurence and Lahire, Philippe},
  booktitle = {10th Belgian-Netherlands Seminar on Software Evolution (BENEVOL)},
  month = dec,
  year = {2011},
  other = {1},
  note = {(BENEVOL'11)},
  lang = {english},
  url = {http://soft.vub.ac.be/benevol2011/abstracts/Acher.pdf},
  sorte = {autre},
  slides = {http://fr.slideshare.net/acher/benevol11-reverse-engineering-architectural-feature-models},
  month_numeric = {12}
}
@techreport{acher-collet-etal:2010b,
  title = {Managing Multiple Software Product Lines Using Merging Techniques},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  month = may,
  year = {2010},
  address = {Sophia Antipolis, France},
  institution = {University of Nice Sophia Antipolis, I3S CNRS},
  type = {Technical Report},
  lang = {english},
  isbn = {I3S/RR-2010-06-FR},
  url = {http://www.i3s.unice.fr/~mh/RR/2010/RR-10-06-P.LAHIRE.pdf},
  sorte = {rappo},
  other = {1},
  abstract = {A growing number of organizations produce and maintain multiple Software Product Lines (SPLs) or design software products that utilize features in SPLs maintained by competing suppliers. Manually building monolithic Feature Models (FMs) to help manage features described across different SPLs is error-prone and tedious and the resulting FMs can be difficult to understand and use. In this paper we propose a compositional approach to managing multiple SPLs that involves automatically merging FMs defined across the SPLs. We illustrate how the approach can be used to create FMs that support selection of products from among sets of competing products provided by different companies or suppliers. The merging techniques can also manage features from different SPLs which are then combined to form products. We show that the proposed approach results in more compact FMs, and we provide some empirical results on the complexity and scalability of the composition operators used in the approach. },
  month_numeric = {5}
}
@inproceedings{acher-collet-etal:2010a,
  title = {Managing Variability in Worklow with Feature Model Composition Operators},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  year = {2010},
  booktitle = {9th International Conference on Software Composition (SC'10)},
  series = {Software Composition},
  month = jun,
  ar = {28\%},
  days = {June 28-July 2},
  organization = {},
  publisher = {Springer},
  volume = {LNCS},
  pages = {16},
  lang = {english},
  isbn = {},
  url = {},
  hal_id = {hal-00484152},
  slides = {http://fr.slideshare.net/acher/managing-variability-in-workflow-with-feature-model-composition-operators-4851930},
  sorte = {colin},
  abstract = {In grid-based scientific applications, building a workflow essentially involves composing parameterized services describing families of services and then configuring the resulting workflow product line. In domains (e.g., medical imaging) in which many different kinds of highly parameterized services exist, there is a strong need to manage variabilities so that scientists can more easily configure and compose services with consistency guarantees. In this paper, we propose an approach in which variable points in services are described with several separate feature models, so that families of workflow can be defined as compositions of feature models. A compositional technique then allows reasoning about the compatibility between connected services to ensure consistency of an entire workflow, while supporting automatic propagation of variability choices when configuring services. },
  month_numeric = {6}
}
@inproceedings{acher-collet-etal:2010,
  title = {Comparing Approaches to Implement Feature Model Composition},
  hal_id = {hal-00484232},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  year = {2010},
  booktitle = {6th European Conference on Modelling Foundations and Applications (ECMFA)},
  ar = {31\%},
  month = jun,
  days = {15-18 june},
  organization = {},
  publisher = {Springer},
  volume = {LNCS},
  pages = {16},
  lang = {english},
  isbn = {},
  url = {},
  slides = {http://fr.slideshare.net/acher/comparing-approaches-to-implement-feature-model-composition},
  sorte = {colin},
  abstract = {The use of Feature Models (FMs) to define the valid combinations of features in Software Product Lines (SPL) is becoming commonplace. To enhance the scalability of FMs, support for composing FMs describing different SPL aspects is needed. Some composition operators, with interesting property preservation capabilities, have already been defined but a comprehensive and efficient implementation is still to be proposed. In this paper, we systematically compare strengths and weaknesses of different implementation approaches. The study provides some evidence that using generic model composition frameworks are not helping much in the realization, whereas a specific solution is finally necessary and clearly stands out by its qualities.},
  month_numeric = {6}
}
@inproceedings{acher-collet-etal:2009,
  title = {Composing Feature Models},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and France, Robert},
  year = {2009},
  hal_id = {hal-00415767},
  booktitle = {2nd International Conference on Software Language Engineering (SLE'09)},
  series = {LNCS},
  month = oct,
  days = {5-6 oct},
  organization = {},
  ar = {19\%},
  publisher = {LNCS},
  pages = {20},
  lang = {english},
  isbn = {},
  slides = {http://fr.slideshare.net/acher/composing-feature-models},
  sorte = {colin},
  important = {1},
  note = {Most Influential Paper Award at SLE 2019},
  abstract = {Feature modeling is a widely used technique in Software Product Line development. Feature models allow stakeholders to describe domain concepts in terms of commonalities and differences within a family of software systems. Developing a complex monolithic feature model can require significant effort and restrict the reusability of a set of features already modeled. We advocate using modeling techniques that support separating and composing concerns to better manage the complexity of developing large feature models. In this paper, we propose a set of composition operators dedicated to feature models. These composition operators enable the development of large feature models by composing smaller feature models which address well-defined concerns. The operators are notably distinguished by their documented capabilities to preserve some significant properties.},
  month_numeric = {10}
}
@inproceedings{acher-collet-etal:2009b,
  title = {Modeling Context and Dynamic Adaptations with Feature Models},
  author = {Acher, Mathieu and Collet, Philippe and Fleurey, Franck and Lahire, Philippe and Moisan, Sabine and Rigault, Jean-Paul},
  year = {2009},
  booktitle = {4th International Workshop Models@run.time at Models 2009 (MRT'09)},
  series = {},
  month = oct,
  days = {5 oct},
  workshop = {1},
  organization = {},
  publisher = {},
  pages = {10},
  lang = {english},
  isbn = {},
  hal_id = {hal-00419990},
  sorte = {colin},
  abstract = {Self-adaptive and dynamic systems adapt their behavior according to the context of execution. The contextual information exhibits multiple variability factors which induce many possible configurations of the software system at runtime. The challenge is to specify the adaptation rules that can link the dynamic variability of the context with the possible variants of the system. Our work investigates the systematic use of feature models for modeling the context and the software variants, together with their inter relations, as a way to configure the adaptive system with respect to a particular context. A case study in the domain of video surveillance systems is used to illustrate the approach.},
  month_numeric = {10}
}
@inproceedings{acher-lahire-etal:2009,
  title = {Tackling High Variability in Video Surveillance Systems through a Model Transformation Approach},
  author = {Acher, Mathieu and Lahire, Philippe and Moisan, Sabine and Rigault, Jean-Paul},
  year = {2009},
  booktitle = {MiSE '09: Proceedings of the 2009 international workshop on Modeling in software engineering at ICSE 2009 (MiSE'09)},
  series = {},
  month = may,
  workshop = {1},
  days = {17-18 May},
  organization = {IEEE Computer Society},
  publisher = {},
  pages = {},
  lang = {english},
  isbn = {},
  hal_id = {hal-00415770},
  sorte = {colin},
  abstract = {This work explores how model-driven engineering techniques can support the configuration of systems in domains presenting multiple variability factors. Video surveillance is a good candidate for which we have an extensive experience. Ultimately, we wish to automatically generate a software component assembly from an application specification, using model to model transformations. The challenge is to cope with variability both at the specification and at the implementation levels. Our approach advocates a clear separation of concerns. More precisely, we propose two feature models, one for task specification and the other for software components. The first model can be transformed into one or several valid component configurations through step-wise specialization. This paper outlines our approach, focusing on the two feature models and their relations. We particularly insist on variability and constraint modeling in order to achieve the mapping from domain variability to software variability through model transformations.},
  month_numeric = {5}
}
@inproceedings{acher-collet-etal:2008a,
  title = {Imaging Services on the Grid as a Product Line: Requirements and Architecture},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe and Montagnat, Johan},
  year = {2008},
  workshop = {1},
  booktitle = {Service-Oriented Architectures and Software Product Lines - Putting Both Together (SOAPL'08)},
  series = {},
  month = sep,
  days = {8 },
  organization = {(associated workshop issue of SPLC 2008)},
  publisher = {IEEE Computer Society},
  pages = {},
  hal_id = {hal-00419992},
  lang = {english},
  isbn = {},
  sorte = {colin},
  abstract = {SOA is now the reference architecture for medical imaging processing on the grid. Imaging services must be composed in workflows to implement the processing chains, but the need to handle end-to-end qualities of service hampered both the provision of services and their composition. This paper analyses the variability of functional and non functional aspects of this domain and proposes a first architecture in which services are organized within a product line architecture and metamodels help in structuring necessary information.},
  month_numeric = {9}
}
@inproceedings{acher-collet-etal:2008,
  title = {Issues in Managing Variability of Medical Imaging Grid Services},
  author = {Acher, Mathieu and Collet, Philippe and Lahire, Philippe},
  year = {2008},
  workshop = {1},
  hal_id = {hal-00459517},
  booktitle = {MICCAI-Grid Workshop (MICCAI-Grid)},
  pages = {},
  lang = {english},
  isbn = {},
  sorte = {colin},
  abstract = {In medical image analysis, there exist multifold applications to grids and service-oriented architectures are more and more used to implement such imaging applications. In this context, workflow and service architects have to face an important variability problem related both to the functional description of services, and to the numerous quality of service (QoS) dimensions that are to be considered. In this paper, we analyze such variability issues and establish the requirements of a service product line, which objective is to facilitate variability handling in the image processing chain.}
}
@mastersthesis{acher:2008,
  title = {Vers une ligne de services pour la grille: application \`a l’imagerie m\'edicale},
  author = {Acher, Mathieu},
  month = jun,
  year = {2008},
  address = {Sophia Antipolis, France},
  school = {Universit{\'e} de Nice Sophia-Antipolis},
  pages = {23},
  type = {},
  lang = {french},
  isbn = {},
  url = {http://nyx.unice.fr/publis/acher:2008.pdf},
  sorte = {these},
  month_numeric = {6}
}
@misc{acher-aranega:2008,
  title = {Un compte rendu de la conf\'erence Models 2008 (Toulouse, France)},
  author = {Acher, Mathieu and Aranega, Vincent},
  month = dec,
  year = {2008},
  pages = {15},
  lang = {french},
  url = {http://nyx.unice.fr/publis/acher-aranega:2008.pdf},
  sorte = {autre},
  other = {1},
  month_numeric = {12}
}