From 4cc5a8bd7dbebad5280e577691a1464118e67872 Mon Sep 17 00:00:00 2001 From: Corrie Sccheepers Date: Sun, 4 Feb 2024 21:02:11 +0200 Subject: [PATCH] WIP --- src/lib/library.bib | 2027 ++++++++++++++++++++++--------------------- 1 file changed, 1021 insertions(+), 1006 deletions(-) diff --git a/src/lib/library.bib b/src/lib/library.bib index fa1bd59..799be69 100644 --- a/src/lib/library.bib +++ b/src/lib/library.bib @@ -3,199 +3,52 @@ BibTeX export options can be customized via Options -> BibTeX in Mendeley Desktop -@inproceedings{Bogatinovski2023, -abstract = {Logging in software development plays a crucial role in bug-fixing, maintaining the code and operating the application. Logs are hints created by human software developers that aim to help human developers and operators in identifying root causes for application bugs or other misbehaviour types. They also serve as a bridge between the Devs and the Ops, allowing the exchange of information. The rise of the DevOps paradigm with the CI/CD pipelines led to a significantly higher number of deployments per month and consequently increased the logging requirements. In response, AI-enabled methods for IT operation (AIOps) are introduced to automate the testing and run-time fault tolerance to a certain extent. However, using logs tailored for human understanding to learn (automatic) AI methods poses an ill-defined problem: AI algorithms need no hints but structured, precise and indicative data. Until now, AIOps researchers adapt the AI algorithms to the properties of the existing human-centred data (e.g., log sentiment), which are not always trivial to model. By pointing out the discrepancy, we envision that there exists an alternative approach: the logging can be adapted such that the produced logs are better tailored towards the strengths of the AI-enabled methods. In response, in this vision paper, we introduce auto-logging, which devises the idea of how to automatically insert log instructions into the code that can better suit AI-enabled methods as end-log consumers.}, -author = {Bogatinovski, Jasmin and Kao, Odej}, -booktitle = {2023 IEEE/ACM 45th International Conference on Software Engineering: New Ideas and Emerging Results (ICSE-NIER)}, -doi = {10.1109/ICSE-NIER58687.2023.00023}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Auto-Logging{\_}AI-centred{\_}Logging{\_}Instrumentation.pdf:pdf}, -isbn = {979-8-3503-0039-0}, -issn = {02705257}, -keywords = {AIOps,logging,software engineering}, -month = {may}, -pages = {95--100}, -publisher = {IEEE}, -title = {{Auto-Logging: AI-centred Logging Instrumentation}}, -year = {2023} -} -@article{Yao2009, -abstract = {In order to find out the user patterns that hide in web logs, log mining technology is one of the best ways. Log mining is the usage of data mining in the field of web server' logs. Although there are a set of softwares which can be used to analysis web logs, the algorithm raised in this article pay special attention to discover the relationship among all the pages of the web site. In this algorithm, size-link radio and static inner-link degree was creative used. According to the result of experiment, this algorithm can exactly find out the correlative ones among massive pages.}, -author = {Yao, Lei Yue and Xiong, Jian Ying}, -doi = {10.1109/GRC.2009.5255028}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/The{\_}research{\_}and{\_}implementation{\_}of{\_}a{\_}correlative{\_}degree{\_}mining{\_}algorithm{\_}based{\_}on{\_}IIS{\_}logs.pdf:pdf}, -isbn = {9781424448319}, -journal = {2009 IEEE International Conference on Granular Computing, GRC 2009}, -keywords = {IIS log analysis,Web log analysis,Web usage mining}, -pages = {706--709}, -title = {{The research and implementation of a correlative degree mining algorithm based on IIS logs}}, -year = {2009} -} -@article{Bounnady2016, -abstract = {Nowadays, web applications play an important role for many organizations, and there are many technologies variable in the market and each technology have its own advantage, so choosing the technologies is one important factor. This research studies of performance processing speed of two common technologies for developing web applications namely PHP and ASP.NET. These technologies run on environment as Windows operating system where ASP.NET using IIS will be compared with PHP using IIS as well as the PHP using Apache. The comparison is conducted through five approaches including webpage loading; algorithm processing; database managing; file(s) uploading and reading/writing external file(s). The results from our research demonstrate as ASP.NET is more effective than PHP in various areas such as webpage loading (1.81 times faster), external file(s) reading/writing (3.77 times faster) and Algorithm calculation (Hanoi Tower 14.74 times faster). However, PHP can operating more efficiently in some other areas such as variable datatypes exchanging (adding big numbers PHP is 6.82 times faster), database managing (PHP is 1.45 times faster) and files uploading (PHP is 1.17 times faster).}, -author = {Bounnady, Khampheth and Phanthavong, Khampaseuth and Pathoumvanh, Somsanouk and Sihalath, Keokanlaya}, -doi = {10.1109/ECTICon.2016.7561484}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Bounnady et al. - 2016 - Comparison the processing speed between PHP and ASP.NET.pdf:pdf}, -isbn = {9781467397490}, -journal = {2016 13th International Conference on Electrical Engineering/Electronics, Computer, Telecommunications and Information Technology, ECTI-CON 2016}, -keywords = {ASP.Net,performance comparison,php,processing speed,server site scripting}, -pages = {0--4}, -publisher = {IEEE}, -title = {{Comparison the processing speed between PHP and ASP.NET}}, -year = {2016} -} -@article{Niu2018, -abstract = {This article summarizes the RE in the Age of Continuous Deployment panel at the 25th IEEE International Requirements Engineering Conference. It highlights two synergistic points (user stories and linguistic tooling) and one challenge (nonfunctional requirements) in fast-paced, agile-like projects, and recommends how to carry on the dialogue.}, -author = {Niu, Nan and Brinkkemper, Sjaak and Franch, Xavier and Partanen, Jari and Savolainen, Juha}, -doi = {10.1109/MS.2018.1661332}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Niu et al. - 2018 - Requirements engineering and continuous deployment.pdf:pdf}, -issn = {07407459}, -journal = {IEEE Software}, -keywords = {25th IEEE International Requirements Engineering C,RE in the Age of Continuous Deployment,agile software development,continuous deployment,linguistic tooling,nonfunctional requirements,requirements engineering,software development,software engineering,software requirements,user stories}, -number = {2}, -pages = {86--90}, -publisher = {IEEE}, -title = {{Requirements engineering and continuous deployment}}, -volume = {35}, -year = {2018} -} -@article{Harkness2007, -abstract = {The latest version of Microsoft Visual Basic (2005) is built upon the Microsoft.NET Framework. It has finally become a fully-fledged Object Oriented Language with all the associated features one would come to expect. It allows Visual Basic programmers to tackle much larger applications, through improved scalability and reusability. This article discusses the new features using code examples to real applications in the Laboratory Automation environment. {\textcopyright} 2007, Society for Laboratory Automation and Screening. All rights reserved.}, -author = {Harkness, Robert and Crook, Malcolm and Povey, David}, -doi = {10.1016/j.jala.2006.10.014}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/HARKNESS, CROOK, POVEY - 2007 - Programming Review of Visual Basic.NET for the Laboratory Automation Industry(2).pdf:pdf}, -issn = {15402452}, -journal = {Journal of Laboratory Automation}, -keywords = {.NET,laboratory automation,object oriented,visual basic}, -number = {1}, -pages = {25--32}, -title = {{Programming Review of Visual Basic.NET for the Laboratory Automation Industry}}, -volume = {12}, -year = {2007} -} -@article{Song2017, -abstract = {The aligning of event logs with process models is of great significance for process mining to enable conformance checking, process enhancement, performance analysis, and trace repairing. Since process models are increasingly complex and event logs may deviate from process models by exhibiting redundant, missing, and dislocated events, it is challenging to determine the optimal alignment for each event sequence in the log, as this problem is NP-hard. Existing approaches utilize the cost-based A∗ algorithm to address this problem. However, scalability is often not considered, which is especially important when dealing with industrial-sized problems. In this paper, by taking advantage of the structural and behavioral features of process models, we present an efficient approach which leverages effective heuristics and trace replaying to significantly reduce the overall search space for seeking the optimal alignment. We employ real-world business processes and their traces to evaluate the proposed approach. Experimental results demonstrate that our approach works well in most cases, and that it outperforms the state-of-the-art approach by up to 5 orders of magnitude in runtime efficiency.}, -author = {Song, Wei and Xia, Xiaoxu and Jacobsen, Hans-Arno and Zhang, Pengcheng and Hu, Hao}, -doi = {10.1109/TSC.2016.2601094}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Song et al. - 2017 - Efficient Alignment Between Event Logs and Process Models.pdf:pdf}, -issn = {1939-1374}, -journal = {IEEE Transactions on Services Computing}, -keywords = {Event logs,alignment,process decomposition,process models,trace replaying,trace segmentation}, -month = {jan}, -number = {1}, -pages = {136--149}, -publisher = {IEEE}, -title = {{Efficient Alignment Between Event Logs and Process Models}}, -volume = {10}, -year = {2017} +@article{Event1800, +author = {Event, Common}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Song, Luo, Chen - 2008 - Behavior pattern mining Apply process mining technology to common event logs of information systems.pdf:pdf}, +isbn = {978-1-4244-1685-1}, +journal = {Event (London)}, +pages = {1800--1805}, +title = {{Behavior Pattern Mining: Apply Process Mining Technology}}, +year = {1800} } -@article{Huang2020, -abstract = {Large enterprise systems often produce a large volume of event logs, and event log parsing is an important log management task. The goal of log parsing is to construct log templates from log messages and convert raw log messages into structured log messages. A log parser can help engineers monitor their systems and detect anomalous behaviors and errors. Most existing log parsing methods focus on offline methods, which require all log data to be available before parsing. In addition, the massive volume of log messages makes the process complex and time-consuming. In this paper, we propose Paddy, an online event log parsing method. Paddy uses a dynamic dictionary structure to build an inverted index, which can search the template candidates efficiently with a high rate of recall. The use of Jaccard similarity and length feature to rank candidates can improve parsing precision. We evaluated our proposed method on 16 real log datasets from various sources including distributed systems, supercomputers, operating systems, mobile systems, and standalone software. Our experimental results demonstrate that Paddy achieves the highest accuracy on eight data sets out of sixteen datasets compared to other baseline methods. We also evaluated the robustness and runtime efficiency of the methods and the experimental results show that our method Paddy achieves superior stableness and is scalable with a large volume of log messages.}, -author = {Huang, Shaohan and Liu, Yi and Fung, Carol and He, Rong and Zhao, Yining and Yang, Hailong and Luan, Zhongzhi}, -doi = {10.1109/NOMS47738.2020.9110435}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Huang et al. - 2020 - Paddy An Event Log Parsing Approach using Dynamic Dictionary(3).pdf:pdf}, -isbn = {9781728149738}, -journal = {Proceedings of IEEE/IFIP Network Operations and Management Symposium 2020: Management in the Age of Softwarization and Artificial Intelligence, NOMS 2020}, -keywords = {Dynamic Dictionary,Log Parsing,Log analysis}, -title = {{Paddy: An Event Log Parsing Approach using Dynamic Dictionary}}, +@article{DeLeon-Sigg2020, +abstract = {Technical debt concept has been in use since the 90s's decade. Several processes, techniques and tools, such as those related with software maintenance and risk control, are used to manage, prevent, measure and reduce technical debt. Technical debt management includes activities to identify, measure, prioritize, repay, and monitor it, but one of the main issues related with management resides in the complexity to make technical debt visible to organizations. In this paper is presented the application of the Normative Process Framework to make technical debt visible with a large system developed by students of software engineering. The Normative Process Framework is used in conjunction with a process to find technical debt and document it in a simple format. Results show how technical debt was made visible for that system in a simplified way, by using documentation generated during development, and considering not only code, but also other software assets. Once technical debt is made visible is easier to evaluate and prioritize it, to establish a convenient set of actions to control it.}, +author = {{De Leon-Sigg}, Maria and Vazquez-Reyes, Sodel and Rodriguez-Avila, Daniel}, +doi = {10.1109/CONISOFT50191.2020.00022}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/De Leon-Sigg, Vazquez-Reyes, Rodriguez-Avila - 2020 - Towards the use of a framework to make technical debt visible.pdf:pdf;:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Towards{\_}the{\_}Use{\_}of{\_}a{\_}Framework{\_}to{\_}Make{\_}Technical{\_}Debt{\_}Visible.pdf:pdf}, +isbn = {9781728184500}, +journal = {Proceedings - 2020 8th Edition of the International Conference in Software Engineering Research and Innovation, CONISOFT 2020}, +keywords = {Framework,Technical debt,Technical debt management,Technical debt visibility}, +pages = {86--92}, +title = {{Towards the use of a framework to make technical debt visible}}, year = {2020} } -@article{Sosnowski2011, -author = {Sosnowski, Janusz and Gawkowski, Piotr and Cabaj, Krzysztof}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Sosnowski, Gawkowski, Cabaj - 2014 - Event and performance logs in system management and evaluation.pdf:pdf}, -journal = {Information Systems in Management XIV, Security and Effectiveness of ICT Systems}, -number = {January 2011}, -title = {{Event and Performance Logs in System Management and Evaluation}}, -year = {2011} -} -@article{Reimanis2016, -abstract = {Providing software developers and researchers with useful technical debt analysis tools is an instrumental outcome of software engineering and technical debt research. Such tools aggregate industry best practices to provide users with organized and quantifiable metrics that can help minimize the time it takes to synthesize and make an intelligent future decision regarding a system. Today, most tools rely primarily on structural measurements from static analysis to generate results. However, it is also necessary to consider measurements that capture the behavior of software, as these represent additional complexities within a system that structural measurements are incapable of detecting. Herein, we present our position, that more effort needs to be placed towards understanding software behavior so that technical debt analysis tools can begin supporting them, in order to provide tool users with a more accurate and complete view of their system. In this paper, we describe this problem in the context of design patterns and outline an effective method to talk about behaviors in the future. We create and classify two example behaviors using our method, both of which increase the technical debt in their respective design pattern applications.}, -author = {Reimanis, Derek and Izurieta, Clemente}, -doi = {10.1109/MTD.2016.13}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Reimanis, Izurieta - 2016 - Towards Assessing the Technical Debt of Undesired Software Behaviors in Design Patterns.pdf:pdf}, -isbn = {9781509038541}, -journal = {Proceedings - 2016 IEEE 8th International Workshop on Managing Technical Debt, MTD 2016}, -keywords = {design patterns,softare behavior,software architecture,technical debt}, -pages = {24--27}, -publisher = {IEEE}, -title = {{Towards Assessing the Technical Debt of Undesired Software Behaviors in Design Patterns}}, -year = {2016} -} -@article{Rahmani2010, -abstract = {Open source software (OSS) development is considered an effective approach to ensuring acceptable levels of software quality. One facet of quality improvement involves the detection of potential relationship between defect density and other open source software metrics. This paper presents an empirical study of the relationship between defect density and download number, software size and developer number as three popular repository metrics. This relationship is explored by examining forty-four randomly selected open source software projects retrieved from SourceForge.net. By applying simple and multiple linear regression analysis, the results reveal a statistically significant relationship between defect density and number of developers and software size jointly. However, despite theoretical expectations, no significant relationship was found between defect density and number of downloads in OSS projects. {\textcopyright} 2010 IEEE.}, -author = {Rahmani, Cobra and Khazanchi, Deepak}, -doi = {10.1109/ICIS.2010.11}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Rahmani, Khazanchi - 2010 - A study on defect density of open source software(2).pdf:pdf}, -isbn = {9780769541471}, -journal = {Proceedings - 9th IEEE/ACIS International Conference on Computer and Information Science, ICIS 2010}, -keywords = {Bug repository,Defect density,Linear regression,Open source software,Software repository metric}, -pages = {679--683}, -publisher = {IEEE}, -title = {{A study on defect density of open source software}}, -year = {2010} -} -@article{EvangelinGeetha2007, -abstract = {Software performance is an important nonfunctional attribute of software systems for producing quality software. Performance issues must be considered throughout software project development. Predicting performance early in the life cycle is addressed by many methodologies, but the data collected during feasibility study not considered for predicting performance. In this paper, we consider the data collected (technical and environmental factors) during feasibility study of software project management to predict performance. We derive an algorithm to predict the performance metrics and simulate the results using a case study on banking application. {\textcopyright}2007 IEEE.}, -author = {{Evangelin Geetha}, D. and {Suresh Kumar}, T. V. and {Rajani Kanth}, K.}, -doi = {10.1109/ICICS.2007.4449845}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Evangelin Geetha, Suresh Kumar, Rajani Kanth - 2007 - Predicting performance of software systems during feasibility study of software pr.pdf:pdf}, -isbn = {1424409837}, -journal = {2007 6th International Conference on Information, Communications and Signal Processing, ICICS}, -keywords = {Feasibility study,Software performance engineering,Use case point}, -pages = {1--5}, -title = {{Predicting performance of software systems during feasibility study of software project management}}, -year = {2007} -} -@article{Shahid2016, -abstract = {Change impact is an important issue in software maintenance phase. As retesting is required over a software change, there is a need to keep track of software impact associated with changes. Even a small software change can ripple through to cause a large unintended impact elsewhere in the system that makes it difficult to identify the affected functionalities. The impact after changes demands for a special traceability approach. This paper presents a new approach and prototype tool, Hybrid Coverage Analysis Tool (HYCAT), as a proof of concept to support the software manager or maintainers to manage impact analysis and its related traceability before and after a change in any software artifact. The proposed approach was then evaluated using a case study, On-Board Automobile (OBA), and experimentation. The traceability output before and after changes were produced and analyzed to capture impact analysis. The results of the evaluation show that the proposed approach has achieved some promising output and remarkable understanding as compared to existing approaches.}, -author = {Shahid, Muhammad and Ibrahim, Suhaimi}, -doi = {10.1109/IBCAST.2016.7429908}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Shahid, Ibrahim - 2016 - Change impact analysis with a software traceability approach to support software maintenance.pdf:pdf}, -isbn = {9781467391276}, -journal = {Proceedings of 2016 13th International Bhurban Conference on Applied Sciences and Technology, IBCAST 2016}, -keywords = {impact analysis,software change,software maintenance,software traceability}, -pages = {391--396}, -title = {{Change impact analysis with a software traceability approach to support software maintenance}}, -year = {2016} -} -@inproceedings{Stojanov2017, -abstract = {Software maintenance has been recognized by academicians and practitioners from industry as the most challenging and expensive part in software life cycle. The complexity and high costs of maintenance activities require systematic evidence of all maintenance activities and accurate models for planning and managing them. A common way for analyzing practice in software engineering is based on trend analysis of historical data related to activities and tasks implemented in the past. This paper presents a case study conducted in a micro software company aimed at introducing a schema for classifying maintenance tasks, and identifying trends in software maintenance tasks distribution among the programmers in the company. The discussion of results includes benefits for the company, limitations of the research and implications for academicians and practitioners in industry. The paper concludes with a few promising further research directions.}, -author = {Stojanov, Zeljko and Stojanov, Jelena and Dobrilovic, Dalibor and Petrov, Nikola}, -booktitle = {2017 IEEE 15th International Symposium on Intelligent Systems and Informatics (SISY)}, -doi = {10.1109/SISY.2017.8080547}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Stojanov et al. - 2017 - Trends in software maintenance tasks distribution among programmers A study in a micro software company.pdf:pdf}, -isbn = {978-1-5386-3855-2}, -month = {sep}, -pages = {000023--000028}, -publisher = {IEEE}, -title = {{Trends in software maintenance tasks distribution among programmers: A study in a micro software company}}, -year = {2017} -} -@article{VanDerAalst2004, -abstract = {Contemporary workflow management systems are driven by explicit process models, i.e., a completely specified workflow design is required in order to enact a given workflow process. Creating a workflow design is a complicated time-consuming process and, typically, there are discrepancies between the actual workflow processes and the processes as perceived by the management. Therefore, we have developed techniques for discovering workflow models. The starting point for such techniques is a so-called "workflow log" containing information about the workflow process as it is actually being executed. We present a new algorithm to extract a process model from such a log and represent it in terms of a Petri net. However, we will also demonstrate that it is not possible to discover arbitrary workflow processes. In this paper, we explore a class of workflow processes that can be discovered. We show that the $\alpha$-algorithm can successfully mine any workflow represented by a so-called SWF-net.}, -author = {{Van Der Aalst}, Wil and Weijters, Ton and Maruster, Laura}, -doi = {10.1109/TKDE.2004.47}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Workflow{\_}mining{\_}discovering{\_}process{\_}models{\_}from{\_}event{\_}logs.pdf:pdf}, -issn = {10414347}, -journal = {IEEE Transactions on Knowledge and Data Engineering}, -keywords = {Data mining,Petri nets,Workflow management,Workflow mining}, -number = {9}, -pages = {1128--1142}, -title = {{Workflow mining: Discovering process models from event logs}}, -volume = {16}, -year = {2004} +@article{Kumar2013, +abstract = {Purpose - The purpose of this paper is to provide an overview of research and development in the measurement of maintenance performance. It considers the problems of various measuring parameters and comments on the lack of structure in and references for the measurement of maintenance performance. The main focus is to determine how value can be created for organizations by measuring maintenance performance, examining such maintenance strategies as condition-based maintenance, reliability-centred maintenance, e-maintenance, etc. In other words, the objectives are to find frameworks or models that can be used to evaluate different maintenance strategies and determine the value of these frameworks for an organization. Design/methodology/approach - A state-of-the-art literature review has been carried out to answer the following two research questions. First, what approaches and techniques are used for maintenance performance measurement (MPM) and which MPM techniques are optimal for evaluating maintenance strategies? Second, in general, how can MPM create value for organizations and, more specifically, which system of measurement is best for which maintenance strategy? Findings - The body of knowledge on maintenance performance is both quantitatively and qualitatively based. Quantitative approaches include economic and technical ratios, value-based and balanced scorecards, system audits, composite formulations, and statistical and partial maintenance productivity indices. Qualitative approaches include human factors, amongst other aspects. Qualitatively based approaches are adopted because of the inherent limitations of effectively measuring a complex function such as maintenance through quantitative models. Maintenance decision makers often come to the best conclusion using heuristics, backed up by qualitative assessment, supported by quantitative measures. Both maintenance performance perspectives are included in this overview. Originality/value - A comprehensive review of maintenance performance metrics is offered, aiming to give, in a condensed form, an extensive introduction to MPM and a presentation of the state of the art in this field. {\textcopyright} Emerald Group Publishing Limited.}, +author = {Kumar, Uday and Galar, Diego and Parida, Aditya and Stenstr{\"{o}}m, Christer and Berges, Luis}, +doi = {10.1108/JQME-05-2013-0029}, +editor = {Kumar, Uday}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Kumar et al. - 2013 - Maintenance performance metrics A state-of-the-art review.pdf:pdf}, +issn = {1355-2511}, +journal = {Journal of Quality in Maintenance Engineering}, +keywords = {Framework,Hierarchy,Indicators,Key performance indicators,Maintenance,Maintenance performance measurement,Metrics,Performance,Performance measurement}, +month = {aug}, +number = {3}, +pages = {233--277}, +title = {{Maintenance performance metrics: a state‐of‐the‐art review}}, +volume = {19}, +year = {2013} } -@article{Pathan2014, -abstract = {Data mining is the process of finding correlations in the relational databases. There are different techniques for identifying malicious database transactions. Many existing approaches which profile is SQL query structures and database user activities to detect intrusion, the log mining approach is the automatic discovery for identifying anomalous database transactions. Mining of the Data is very helpful to end users for extracting useful business information from large database. Multi-level and multi-dimensional data mining are employed to discover data item dependency rules, data sequence rules, domain dependency rules, and domain sequence rules from the database log containing legitimate transactions. Database transactions that do not comply with the rules are identified as malicious transactions. The log mining approach can achieve desired true and false positive rates when the confidence and support are set up appropriately. The implemented system incrementally maintain the data dependency rule sets and optimize the performance of the intrusion detection process. {\textcopyright} 2014 IEEE.}, -author = {Pathan, Apashabi Chandkhan and Potey, Madhuri A.}, -doi = {10.1109/ICESC.2014.50}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Detection{\_}of{\_}Malicious{\_}Transaction{\_}in{\_}Database{\_}Using{\_}Log{\_}Mining{\_}Approach.pdf:pdf}, -isbn = {9781479921027}, -journal = {Proceedings - International Conference on Electronic Systems, Signal Processing, and Computing Technologies, ICESC 2014}, -keywords = {Data Mining,Database security,Intrusion Detection}, -pages = {262--265}, -publisher = {IEEE}, -title = {{Detection of malicious transaction in database using log mining approach}}, -year = {2014} +@article{Stark1996, +abstract = {Software maintenance is central to the mission of many organizations. Thus, it is natural for managers to characterize and measure those aspects of products and processes that seem to affect cost, schedule, quality, and functionality of a software maintenance delivery. This paper answers basic questions about software maintenance for a single organization and discusses some of the decisions made based on the answers. Attributes of both the software maintenance process and the resulting product were measured to direct management and engineering attention toward improvement areas, track the improvement over time, and help make choices among alternatives.}, +author = {Stark, George E.}, +doi = {10.1109/icsm.1996.565000}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Measurements{\_}for{\_}managing{\_}software{\_}maintenance.pdf:pdf;:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Stark - 1996 - Measurements for managing software maintenance.pdf:pdf}, +journal = {Conference on Software Maintenance}, +pages = {152--161}, +title = {{Measurements for managing software maintenance}}, +year = {1996} } @article{Pecchia2015, abstract = {Practitioners widely recognize the importance of event logging for a variety of tasks, such as accounting, system measurements and troubleshooting. Nevertheless, in spite of the importance of the tasks based on the logs collected under real workload conditions, event logging lacks systematic design and implementation practices. The implementation of the logging mechanism strongly relies on the human expertise. This paper proposes a measurement study of event logging practices in a critical industrial domain. We assess a software development process at Selex ES, a leading Finmeccanica company in electronic and information solutions for critical systems. Our study combines source code analysis, inspection of around 2.3 millions log entries, and direct feedback from the development team to gain process-wide insights ranging from programming practices, logging objectives and issues impacting log analysis. The findings of our study were extremely valuable to prioritize event logging reengineering tasks at Selex ES.}, @@ -212,118 +65,44 @@ @article{Pecchia2015 volume = {2}, year = {2015} } -@article{Stark1996, -abstract = {Software maintenance is central to the mission of many organizations. Thus, it is natural for managers to characterize and measure those aspects of products and processes that seem to affect cost, schedule, quality, and functionality of a software maintenance delivery. This paper answers basic questions about software maintenance for a single organization and discusses some of the decisions made based on the answers. Attributes of both the software maintenance process and the resulting product were measured to direct management and engineering attention toward improvement areas, track the improvement over time, and help make choices among alternatives.}, -author = {Stark, George E.}, -doi = {10.1109/icsm.1996.565000}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Measurements{\_}for{\_}managing{\_}software{\_}maintenance.pdf:pdf;:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Stark - 1996 - Measurements for managing software maintenance.pdf:pdf}, -journal = {Conference on Software Maintenance}, -pages = {152--161}, -title = {{Measurements for managing software maintenance}}, -year = {1996} -} -@inproceedings{Bekeneva2020, -abstract = {In recent years, process mining algorithms are widely used for process analysis. As input data for process mining algorithms,.xes files are used. This format has a limitation for a number of attributes; therefore, in case of registering a single event with several monitoring devices, there is problem of generating event logs based on heterogeneous data. In this paper, an algorithm for generating event logs based on data from heterogeneous monitoring devices is proposed. The most important parameters for the analysis of events are taken into account. Examples of the formation of event logs when choosing a different set of source data are given, the influence of the number and composition of the selected attributes on the result of building business process models is analyzed.}, -author = {Bekeneva, Yana A.}, -booktitle = {2020 IEEE Conference of Russian Young Researchers in Electrical and Electronic Engineering (EIConRus)}, -doi = {10.1109/EIConRus49466.2020.9039350}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Bekeneva - 2020 - Algorithm for Generating Event Logs Based on Data from Heterogeneous Sources.pdf:pdf}, -isbn = {978-1-7281-5761-0}, -keywords = {.xes files,event logs,log files,process mining}, -month = {jan}, -pages = {233--236}, -publisher = {IEEE}, -title = {{Algorithm for Generating Event Logs Based on Data from Heterogeneous Sources}}, -year = {2020} -} -@inproceedings{Sinha2021, -abstract = {We are very familiar with the phrase 'change is the only constant' and same thing applicable for software industry also. In this new world of software industry' most of the Information technology companies are following a methodology' named Agile where the development work moves quickly. Nowadays very few companies are still following Traditional Waterfall Model as software development life cycle method. In software development life cycle one of the most important phase is quality assurance phase or testing phase. In this context we will be discussing how the software testing has been implemented and how it's going to work with agile methodology. Also, we will do a comparative analysis between Traditional Waterfall model Testing approach and Agile testing approach.}, -author = {Sinha, Abhiup and Das, Pallabi}, -booktitle = {2021 5th International Conference on Electronics, Materials Engineering {\&} Nano-Technology (IEMENTech)}, -doi = {10.1109/IEMENTech53263.2021.9614779}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Agile{\_}Methodology{\_}Vs.{\_}Traditional{\_}Waterfall{\_}SDLC{\_}A{\_}case{\_}study{\_}on{\_}Quality{\_}Assurance{\_}process{\_}in{\_}Software{\_}Industry.pdf:pdf}, -isbn = {978-1-6654-1803-4}, -keywords = {Agile,Software Development,Testing in Agile,Waterfall model}, -month = {sep}, -pages = {1--4}, -publisher = {IEEE}, -title = {{Agile Methodology Vs. Traditional Waterfall SDLC: A case study on Quality Assurance process in Software Industry}}, -url = {https://ieeexplore.ieee.org/document/9614779/}, -year = {2021} -} -@article{Syer2013, -abstract = {Load tests ensure that software systems are able to perform under the expected workloads. The current state of load test analysis requires significant manual review of performance counters and execution logs, and a high degree of system-specific expertise. In particular, memory-related issues (e.g., memory leaks or spikes), which may degrade performance and cause crashes, are difficult to diagnose. Performance analysts must correlate hundreds of megabytes or gigabytes of performance counters (to understand resource usage) with execution logs (to understand system behaviour). However, little work has been done to combine these two types of information to assist performance analysts in their diagnosis. We propose an automated approach that combines performance counters and execution logs to diagnose memory-related issues in load tests. We perform three case studies on two systems: one open-source system and one large-scale enterprise system. Our approach flags {\textless}0.1{\%} of the execution logs with a precision {\textgreater}80{\%}. {\textcopyright} 2013 IEEE.}, -author = {Syer, Mark D. and Jiang, Zhen Ming and Nagappan, Meiyappan and Hassan, Ahmed E. and Nasser, Mohamed and Flora, Parminder}, -doi = {10.1109/ICSM.2013.22}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Syer et al. - 2013 - Leveraging performance counters and execution logs to diagnose memory-related performance issues.pdf:pdf}, -journal = {IEEE International Conference on Software Maintenance, ICSM}, -keywords = {Execution Logs,Load Testing,Performance Counters,Performance Engineering}, -pages = {110--119}, -publisher = {IEEE}, -title = {{Leveraging performance counters and execution logs to diagnose memory-related performance issues}}, -year = {2013} -} -@article{Hasan2012, -abstract = {Software maintenance constitutes a critical function that enables organizations to continually leverage their information technology (IT) capabilities. Despite the growing importance of small organizations, a majority of the software maintenance guidelines are inherently geared toward large organizations. Literature review and case-based empirical studies show that in small organizations software maintenance processes are carried out without following a systemic process. Rather, they rely on ad-hoc and heuristics methods by organizations and individuals. This paper investigates software maintenance practices in a small information systems organization to come up with the nature and categories of heuristics used that successfully guided the software maintenance process. Specifically, this paper documents a set of best practices that small organizations can adopt to facilitate their software maintenance processes in the absence of maintenance-specific guidelines based on preliminary empirical investigation.}, -author = {Hasan, Raza and Chakraborty, Suranjan and Dehlinger, Josh}, -doi = {10.1007/978-3-642-23202-2_9}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Hasan, Chakraborty, Dehlinger - 2012 - Examining software maintenance processes in small organizations Findings from a case study.pdf:pdf}, -isbn = {9783642232015}, -issn = {1860949X}, -journal = {Studies in Computational Intelligence}, -keywords = {Small organizations,ad-hoc process,case study,cognitive heuristics,software maintenance}, -pages = {129--143}, -title = {{Examining software maintenance processes in small organizations: Findings from a case study}}, -volume = {377}, -year = {2012} +@article{VanDerAalst2011, +abstract = {Process mining techniques enable process-centric analytics through automated process discovery, conformance checking, and model enhancement. {\textcopyright} 1970-2012 IEEE.}, +author = {{Van Der Aalst}, W.}, +doi = {10.1109/MC.2011.384}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Aalst - 2011 - Using Process Mining to Bridge the Gap between BI and BPM.pdf:pdf}, +issn = {00189162}, +journal = {Computer}, +keywords = {Business intelligence,Business process management,Discovery analytics,Process mining}, +number = {12}, +pages = {77--80}, +title = {{Using process mining to bridge the gap between BI and BPM}}, +volume = {44}, +year = {2011} } -@article{Wang2008, -abstract = {Ajax is a new concept of web application development proposed in 2005. It is the acronym of Asynchronous JavaScript and XML. Once Ajax appeared, it is rapidly applied to the fields of web development. Ajax application is different from the traditional web development model, using asynchronous interaction. The client unnecessarily waits while the server processes the data submitted. So the use of Ajax can create web user interface which is direct, highly available, richer, more dynamic and closer to a local desktop application. This article introduces the main technology and superiority of Ajax firstly, and then practices web development using ASP.NET 2.0 + Ajax. In this paper, Ajax is applied to the website pass, which enables user to have better registration experience and enhances the user's enthusiasm. The registration functions are enhanced greatly as well. The experiments show that the Ajax web application development model is superior to the traditional web application development model significantly. {\textcopyright}2008 IEEE.}, -author = {Wang, H. and Yang, J.}, -doi = {10.1109/ICIEA.2008.4582637}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Wang, Yang - 2008 - Research and application of web development based on ASP.NET 2.0Ajax.pdf:pdf}, -isbn = {9781424417186}, -journal = {2008 3rd IEEE Conference on Industrial Electronics and Applications, ICIEA 2008}, -pages = {857--860}, -title = {{Research and application of web development based on ASP.NET 2.0+Ajax}}, +@article{Krol2008, +author = {Krol, D and Scigajlo, M and Trawi{\'{n}}ski, Bogda}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Wang et al. - 2017 - Constructing and testing software maintainability assessment models.pdf:pdf}, +isbn = {9781424420964}, +journal = {Machine Learning}, +keywords = {clustering,hcm,server log,user activity,web system}, +number = {July}, +pages = {12--15}, +title = {{I Vestigatio of I Ter Et System User Behaviour Usi G Cluster a Alysis}}, year = {2008} } -@article{Jans2012, -abstract = {In this paper we discuss the value that process mining of event logs can provide to internal and external auditors. Process mining aims to extract knowledge from event logs recorded by an information system. What makes an event log such a unique and potentially invaluable resource for auditing is not only that it provides the auditor with more data to analyze, but also because that additional data is recorded automatically and independently of the person whose behavior is the subject of the audit. In other words, an event log helps achieve the classic audit principle of “four eyes”, or in modern parlance, act as the equivalent of a surveillance camera, peering over the auditee's shoulder. Until recently, the information contained in event logs was rarely used by auditors. In this paper is considered how process mining can add value to auditing, perhaps even to fundamentally transform it.}, -author = {Jans, Mieke Julie and Alles, Michael Gamini and Vasarhelyi, Miklos A.}, -doi = {10.2139/ssrn.1578912}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Jans, Alles, Vasarhelyi - 2010 - Process Mining of Event Logs in Auditing Opportunities and Challenges.pdf:pdf}, -journal = {SSRN Electronic Journal}, -number = {August 2020}, -title = {{Process Mining of Event Logs in Auditing: Opportunities and Challenges}}, -year = {2012} -} -@inproceedings{Rong2020, -abstract = {Background: Logs provide crucial information to understand the dynamic behavior of software systems in modern software development and maintenance. Usually, logs are produced by log statements which will be triggered and executed under certain conditions. However, current studies paid very limited attention to developers' Intentions and Concerns (IC) on logging practice, leading uncertainty that whether the developers' IC are properly reflected by log statements and questionable capability to capture the expected information of system behaviors in logs. Objective: This study aims to reveal the status of developers' IC on logging practice and more importantly, how the IC are properly reflected in software source code in real-world software development. Method: We collected evidence from two sources of a series of interviews and source code analysis which are conducted in a big-data company, followed by consolidation and analysis of the evidence. Results: Major gaps and inconsistencies have been identified between the developers' IC and real log statements in source code. Many code snippets contained no log statements that the interviewees claimed to have inserted. Conclusion: Developers' original IC towards logging practice are usually poorly realized, which inevitably impacted the motivation and purpose to conduct this practice.}, -author = {Rong, Guoping and Xu, Yangchen and Gu, Shenghui and Zhang, He and Shao, Dong}, -booktitle = {Proceedings - 2020 IEEE International Conference on Software Maintenance and Evolution, ICSME 2020}, -doi = {10.1109/ICSME46990.2020.00012}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Can{\_}You{\_}Capture{\_}Information{\_}As{\_}You{\_}Intend{\_}To{\_}A{\_}Case{\_}Study{\_}on{\_}Logging{\_}Practice{\_}in{\_}Industry.pdf:pdf}, -isbn = {9781728156194}, -keywords = {developer,inconsistencies,intentions and concerns,logging practice}, -month = {sep}, -pages = {12--22}, -publisher = {Institute of Electrical and Electronics Engineers Inc.}, -title = {{Can You Capture Information As You Intend To? A Case Study on Logging Practice in Industry}}, -year = {2020} -} -@article{Rong2018a, -abstract = {Background: Logs are the footprints that software systems produce during runtime, which can be used to understand the dynamic behavior of these software systems. To generate logs, logging practice is accepted by developers to place logging statements in the source code of software systems. Compared to the great number of studies on log analysis, the research on logging practice is relatively scarce, which raises a very critical question, i.e. as the original intention, can current logging practice support capturing the behavior of software systems effectively? Aims: To answer this question, we first need to understand how logging practices are implemented these software projects. Method: In this paper, we carried out an empirical study to explore the logging practice in open source software projects so as to establish a basic understanding on how logging practice is applied in real world software projects. The density, log level (what to log?) and context (where to log?) are measured for our study. Results: Based on the evidence we collected in 28 top open source projects, we find the logging practice is adopted highly inconsistently among different developers both across projects and even within one project in terms of the density and log levels of logging statements. However, the choice of what context the logging statements to place is consistent to a fair degree. Conclusion: Both the inconsistency in density and log level and the convergence of context have forced us to question whether it is a reliable means to understand the runtime behavior of software systems via analyzing the logs produced by the current logging practice.}, -author = {Rong, Guoping and Gu, Shenghui and Zhang, He and Shao, Dong and Liu, Wanggen}, -doi = {10.1109/ASWEC.2018.00031}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Rong et al. - 2018 - How is logging practice implemented in open source software projects A preliminary exploration.pdf:pdf}, -isbn = {9781728112411}, -journal = {Proceedings - 25th Australasian Software Engineering Conference, ASWEC 2018}, -keywords = {Empirical study,Java-based,Log,Logging practice}, -pages = {171--180}, +@article{Tang2010, +abstract = {As the information technology industry gains maturity, the number of software systems having moved into maintenance is rapidly growing. Software maintenance is a costly, yet often neglected part of the development life-cycle. A software product maybe has been modified several times for different reasons, but this process don't be efficient manage, so resulting in the software been discard in advance. This paper is motivated by a desire to develop a more practical model to track and manage the maintenance process, to support the maintenance task. The maintenance request form (MRF)'s submission means the start of maintenance activities. One of important tasks is tracking the state of MRF, to control activities in the current environment of large and complex applications. And the other character is import measure into the model, to offer information to help organization control and processing their activities. {\textcopyright} 2010 IEEE.}, +author = {Tang, Li and Mei, Yong Gang and Ding, Jian Jie}, +doi = {10.1109/ETCS.2010.571}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Li, YongGang, JianJie - 2010 - Metric-Based Tracking Management in Software Maintenance.pdf:pdf}, +isbn = {9780769539874}, +journal = {2nd International Workshop on Education Technology and Computer Science, ETCS 2010}, +keywords = {Metric,Software maintenancet,Tracking management}, +pages = {675--678}, publisher = {IEEE}, -title = {{How is logging practice implemented in open source software projects? A preliminary exploration}}, -year = {2018} +title = {{Metric-based tracking management in software maintenance}}, +volume = {1}, +year = {2010} } @article{Anish2015, abstract = {Failure to identify and analyze architecturally significant functional and non-functional requirements (NFRs) early on in the life cycle of a project can result in costly rework in later stages of software development. While NFRs indicate an explicit architectural impact, the impact that functional requirements may have on architecture is often implicit. The skills needed for capturing functional requirements are different than those needed for making architectural decisions. As a result, these two activities are often conducted by different teams in a project. Therefore it becomes necessary to integrate the knowledge gathered by people with different expertise to make informed architectural decisions. We present a study to bring out that functional requirements often have implicit architectural impact and do not always contain comprehensive information to aid architectural decisions. Further, we present our initial work on automating the identification of architecturally significant functional requirements from requirements documents and their classification into categories based on the different kinds of architectural impact they can have. We believe this to be a crucial precursor for recommending specific design decisions. We envisage ArcheR, a tool that (a) automates the identification of architecturally significant functional requirements from requirement specification documents, (b) classify them into categories based on the different kinds of architectural impact they can have, (c) recommend probing questions the business analyst should ask in order to produce a more complete requirements specification, and (d) recommend possible architectural solutions in response to the architectural impact.}, @@ -338,116 +117,213 @@ @article{Anish2015 title = {{Identifying Architecturally Significant Functional Requirements}}, year = {2015} } -@article{Tang2010, -abstract = {As the information technology industry gains maturity, the number of software systems having moved into maintenance is rapidly growing. Software maintenance is a costly, yet often neglected part of the development life-cycle. A software product maybe has been modified several times for different reasons, but this process don't be efficient manage, so resulting in the software been discard in advance. This paper is motivated by a desire to develop a more practical model to track and manage the maintenance process, to support the maintenance task. The maintenance request form (MRF)'s submission means the start of maintenance activities. One of important tasks is tracking the state of MRF, to control activities in the current environment of large and complex applications. And the other character is import measure into the model, to offer information to help organization control and processing their activities. {\textcopyright} 2010 IEEE.}, -author = {Tang, Li and Mei, Yong Gang and Ding, Jian Jie}, -doi = {10.1109/ETCS.2010.571}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Li, YongGang, JianJie - 2010 - Metric-Based Tracking Management in Software Maintenance.pdf:pdf}, -isbn = {9780769539874}, -journal = {2nd International Workshop on Education Technology and Computer Science, ETCS 2010}, -keywords = {Metric,Software maintenancet,Tracking management}, -pages = {675--678}, +@article{Huang2020, +abstract = {Large enterprise systems often produce a large volume of event logs, and event log parsing is an important log management task. The goal of log parsing is to construct log templates from log messages and convert raw log messages into structured log messages. A log parser can help engineers monitor their systems and detect anomalous behaviors and errors. Most existing log parsing methods focus on offline methods, which require all log data to be available before parsing. In addition, the massive volume of log messages makes the process complex and time-consuming. In this paper, we propose Paddy, an online event log parsing method. Paddy uses a dynamic dictionary structure to build an inverted index, which can search the template candidates efficiently with a high rate of recall. The use of Jaccard similarity and length feature to rank candidates can improve parsing precision. We evaluated our proposed method on 16 real log datasets from various sources including distributed systems, supercomputers, operating systems, mobile systems, and standalone software. Our experimental results demonstrate that Paddy achieves the highest accuracy on eight data sets out of sixteen datasets compared to other baseline methods. We also evaluated the robustness and runtime efficiency of the methods and the experimental results show that our method Paddy achieves superior stableness and is scalable with a large volume of log messages.}, +author = {Huang, Shaohan and Liu, Yi and Fung, Carol and He, Rong and Zhao, Yining and Yang, Hailong and Luan, Zhongzhi}, +doi = {10.1109/NOMS47738.2020.9110435}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Huang et al. - 2020 - Paddy An Event Log Parsing Approach using Dynamic Dictionary(3).pdf:pdf}, +isbn = {9781728149738}, +journal = {Proceedings of IEEE/IFIP Network Operations and Management Symposium 2020: Management in the Age of Softwarization and Artificial Intelligence, NOMS 2020}, +keywords = {Dynamic Dictionary,Log Parsing,Log analysis}, +title = {{Paddy: An Event Log Parsing Approach using Dynamic Dictionary}}, +year = {2020} +} +@article{Slaninova2014, +abstract = {This paper is focused on log files where one log file attribute is an originator of the recorded activity (originator is a person in our case). Hence, based on the similar attributes of people, we are able to construct models which explain certain aspects of a persons behaviour. Moreover, we can extract user profiles based on behaviour and find latent ties between users and between different user groups with similar behaviours. We accomplish this by our new approach using the methods from log mining, business process analysis, complex networks and graph theory. The paper describes the whole process of the approach from the log file to the user graph. The main focus is on the step called 'The finding of user behavioural patterns'.}, +author = {Slaninov{\'{a}}, Kateřina}, +doi = {10.1109/ISDA.2013.6920751}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Slaninov{\'{a}} - 2014 - User behavioural patterns and reduced user profiles extracted from log files.pdf:pdf}, +isbn = {9781479935161}, +issn = {21647151}, +journal = {International Conference on Intelligent Systems Design and Applications, ISDA}, +keywords = {analysis of users' behaviour,behavioural patterns,complex networks,user profiles}, +pages = {289--294}, publisher = {IEEE}, -title = {{Metric-based tracking management in software maintenance}}, -volume = {1}, +title = {{User behavioural patterns and reduced user profiles extracted from log files}}, +year = {2014} +} +@article{Jain2018, +abstract = {Developing a quality software product is an essential need for the software industry. Focusing on product's quality allows software users to adapt the product more easily and efficiently. Quality plays a vital role for the software users. It is a confirmation of all the requirements according to customer satisfaction. So, it's important to define a proper software development process that leads to a quality software product. Agile being one of the quickest methodologies for software development, allows the quality product to be delivered to the customer side. The objective of this paper is to discuss the impact of Agile Software Development Process (ASDP) on quality of software product by defining the mapping between agile software development process and various quality attributes. The paper presents an overall importance of software development process for a quality product.}, +author = {Jain, Parita and Sharma, Arun and Ahuja, Laxmi}, +doi = {10.1109/ICRITO.2018.8748529}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/The{\_}Impact{\_}of{\_}Agile{\_}Software{\_}Development{\_}Process{\_}on{\_}the{\_}Quality{\_}of{\_}Software{\_}Product.pdf:pdf}, +isbn = {9781538646922}, +journal = {2018 7th International Conference on Reliability, Infocom Technologies and Optimization: Trends and Future Directions, ICRITO 2018}, +keywords = {Agile Software Development Process,Software Design,Software Implementation,Software Maintainability,Software Quality,Software Requirement Analysis,Software Testing}, +pages = {812--815}, +publisher = {IEEE}, +title = {{The Impact of Agile Software Development Process on the Quality of Software Product}}, +year = {2018} +} +@article{Rehman2018, +abstract = {Agile methodologies gained fame due to the fact of producing high-quality software systems. Maintenance effort is almost more than half of the total effort invested in any software system during its lifespan. A well-discussed issue within the community of researchers and engineers is how to use agile methodologies for maintaining the developed software because agile software development life cycle doesn't have the specifically planned mechanism for maintenance. To bridge this gap, we used the theoretical and empirical technique to formulate factors that should be followed during the agile maintenance including planning for the maintenance; the on-site customer should be present, iterative maintenance, documentation update after each phase and maintenance should be testable.}, +author = {Rehman, Fateh Ur and Maqbool, Bilal and Riaz, Muhammad Qasim and Qamar, Usman and Abbas, Muhammad}, +doi = {10.1109/NCG.2018.8593152}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Rehman et al. - 2018 - Scrum Software Maintenance Model Efficient Software Maintenance in Agile Methodology.pdf:pdf}, +isbn = {9781538641095}, +journal = {21st Saudi Computer Society National Computer Conference, NCC 2018}, +keywords = {Agile,Agile Maintenance,Maintenance Sprints,Scrum,Scrum Maintenance,Software Maintenance}, +pages = {7--11}, +publisher = {IEEE}, +title = {{Scrum Software Maintenance Model: Efficient Software Maintenance in Agile Methodology}}, +year = {2018} +} +@article{Indonesia2019, +author = {Indonesia, Universitas and Xplore, Ieee}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Indonesia, Xplore - 2019 - INTERNATIONAL STANDARD ISO IEC IEEE Systems and software engineering — management.pdf:pdf}, +title = {{INTERNATIONAL STANDARD ISO / IEC / IEEE Systems and software engineering — management}}, +volume = {2019}, +year = {2019} +} +@article{Gu2010, +abstract = {Introduced WebForms, MVC and MVP architecture works under the NET platform,,the characteristics of the three structures were analyzed and compared, especially described WebForms's ViewState, the performance and client ID pollution problems and give solutions; combing their respective advantages, noted suitable scenarios of three kinds framework, aiming to supply reference for developing Web system. {\textcopyright}2010 IEEE.}, +author = {Gu, Ming Xia and Tang, Keming}, +doi = {10.1109/ESIAT.2010.5567323}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Comparative{\_}analysis{\_}of{\_}WebForms{\_}MVC{\_}and{\_}MVP{\_}architecture.pdf:pdf}, +isbn = {9781424473885}, +journal = {2010 2nd Conference on Environmental Science and Information Application Technology, ESIAT 2010}, +keywords = {ASP.NET,MVC,MVP,Software architecture,Web system}, +pages = {391--394}, +title = {{Comparative analysis of WebForms MVC and MVP architecture}}, +volume = {2}, year = {2010} } -@inproceedings{Jailia2016, -author = {Jailia, Manisha and Kumar, Ashok and Agarwal, Manisha and Sinha, Isha}, -booktitle = {2016 International Conference on ICT in Business Industry {\&} Government (ICTBIG)}, -doi = {10.1109/ICTBIG.2016.7892651}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Behavior{\_}of{\_}MVC{\_}Model{\_}View{\_}Controller{\_}based{\_}Web{\_}Application{\_}developed{\_}in{\_}PHP{\_}and{\_}.NET{\_}framework.pdf:pdf}, -isbn = {978-1-5090-5515-9}, -keywords = {- model}, -pages = {1--5}, +@article{Galster2019, +abstract = {During software maintenance, developers have different information needs (e.g., to understand what type of maintenance activity to perform, the impact of a maintenance activity and its effort). However, information to support developers may be distributed across various sources. Furthermore, information captured in formal architecture documentation may be outdated. In this paper, we put forward a late breaking idea and outline a solution to improve the productivity of developers by providing task-specific recommendations based on concrete information needs that arise during software maintenance.}, +author = {Galster, Matthias and Treude, Christoph and Blincoe, Kelly}, +doi = {10.1109/ICSME.2019.00060}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Supporting{\_}Software{\_}Architecture{\_}Maintenance{\_}by{\_}Providing{\_}Task-Specific{\_}Recommendations.pdf:pdf}, +isbn = {9781728130941}, +journal = {Proceedings - 2019 IEEE International Conference on Software Maintenance and Evolution, ICSME 2019}, +keywords = {Software maintenance,natural language processing,software architecture,text classification}, +pages = {370--372}, publisher = {IEEE}, -title = {{Behavior of MVC (Model View Controller) based Web Application developed in PHP and .NET framework}}, -year = {2016} +title = {{Supporting Software Architecture Maintenance by Providing Task-Specific Recommendations}}, +year = {2019} +} +@article{Cui2003, +abstract = {Queries to search engines on the Web are usually short. They do not provide sufficient information for an effective selection of relevant documents. Previous research has proposed the utilization of query expansion to deal with this problem. However, expansion terms are usually determined on term co-occurrences within documents. In this study, we propose a new method for query expansion based on user interactions recorded in user logs. The central idea is to extract correlations between query terms and document terms by analyzing user logs. These correlations are then used to select high-quality expansion terms for new queries. Compared to previous query expansion methods, ours takes advantage of the user judgments implied in user logs. The experimental results show that the log-based query expansion method can produce much better results than both the classical search method and the other query expansion methods.}, +author = {Cui, Hang and Wen, Ji Rong and Nie, Jian Yun and Ma, Wei Ying}, +doi = {10.1109/TKDE.2003.1209002}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Hang Cui et al. - 2003 - Query expansion by mining user logs.pdf:pdf}, +issn = {10414347}, +journal = {IEEE Transactions on Knowledge and Data Engineering}, +keywords = {Information retrieval,Probabilistic model,Query expansion,Search engine,User log}, +number = {4}, +pages = {829--839}, +title = {{Query expansion by mining user logs}}, +volume = {15}, +year = {2003} } -@article{Vaarandi2015, -abstract = {Modern IT systems often produce large volumes of event logs, and event pattern discovery is an important log management task. For this purpose, data mining methods have been suggested in many previous works. In this paper, we present the LogCluster algorithm which implements data clustering and line pattern mining for textual event logs. The paper also describes an open source implementation of LogCluster.}, -author = {Vaarandi, Risto and Pihelgas, Mauno}, -doi = {10.1109/CNSM.2015.7367331}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Vaarandi, Pihelgas - 2015 - LogCluster - A data clustering and pattern mining algorithm for event logs.pdf:pdf}, -isbn = {9783901882777}, -journal = {Proceedings of the 11th International Conference on Network and Service Management, CNSM 2015}, -keywords = {data clustering,data mining,event log analysis,event log clustering,mining patterns from event logs}, -pages = {1--7}, -title = {{LogCluster - A data clustering and pattern mining algorithm for event logs}}, +@article{Patro2015, +abstract = {As we know that the normalization is a pre-processing stage of any type problem statement. Especially normalization takes important role in the field of soft computing, cloud computing etc. for manipulation of data like scale down or scale up the range of data before it becomes used for further stage. There are so many normalization techniques are there namely Min-Max normalization, Z-score normalization and Decimal scaling normalization. So by referring these normalization techniques we are going to propose one new normalization technique namely, Integer Scaling Normalization. And we are going to show our proposed normalization technique using various data sets.}, +archivePrefix = {arXiv}, +arxivId = {1503.06462}, +author = {Patro, S.Gopal Krishna and Sahu, Kishore Kumar}, +doi = {10.17148/iarjset.2015.2305}, +eprint = {1503.06462}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/1503.06462.pdf:pdf}, +journal = {Iarjset}, +keywords = {amzd,integer scaling,normalization,scaling,transformation}, +pages = {20--22}, +title = {{Normalization: A Preprocessing Stage}}, year = {2015} } -@misc{EventBubbling, -author = {JavaScript.Info}, -title = {{Bubbling and capturing}}, -url = {https://javascript.info/bubbling-and-capturing{\#}capturing}, -urldate = {2022-08-14} +@article{Charette1997, +abstract = {Risk management in maintenance differs in major ways from risk management in development. Risk opportunities are more frequent, risks come from more diverse sources, and projects have less freedom to act on them. The authors describe how they dealt with these differences in a large US Navy software maintenance organization.}, +author = {Charette, Robert N. and Adams, Kevin Mac G. and White, Mary B.}, +doi = {10.1109/52.589232}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Charette, Adams, White - 1997 - Managing risk in software maintenance.pdf:pdf}, +issn = {07407459}, +journal = {IEEE Software}, +number = {3}, +pages = {43--50}, +title = {{Managing risk in software maintenance}}, +volume = {14}, +year = {1997} } -@article{Snipes2018, -abstract = {In large-scale software systems, the majority of defective files are architecturally connected, and the architecture connections usually exhibit design flaws, which are associated with higher change-proneness among files and higher maintenance costs. As software evolves with bug fixes, new features, or improvements, unresolved architecture design flaws can contribute to maintenance difficulties. The impact on effort due to architecture design flaws has been difficult to quantify and justify. In this paper, we conducted a case study where we identified flawed architecture relations and quantified their effects on maintenance activities. Using data from this project's source code and revision history, we identified file groups where files are architecturally connected and participated in flawed architecture designs, quantified the maintenance activities in the detected files, and assessed the penalty related to these files.}, -author = {Snipes, Will and Karlekar, Sunil L. and Mo, Ran}, -doi = {10.1109/SEAA.2018.00071}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Snipes, Karlekar, Mo - 2018 - A case study of the effects of architecture debt on software evolution effort.pdf:pdf}, -isbn = {9781538673829}, -journal = {Proceedings - 44th Euromicro Conference on Software Engineering and Advanced Applications, SEAA 2018}, -keywords = {Software architecture,Software maintenance,Technical debt}, -pages = {400--403}, +@article{Jia2018, +abstract = {When systems fail, log data is often the most important information source for fault diagnosis. However, the performance of automatic fault diagnosis is limited by the ad-hoc nature of logs. The key problem is that existing developer-written logs are designed for humans rather than machines to automatically detect system anomalies. To improve the quality of logs for fault diagnosis, we propose a novel log enhancement approach which automatically identifies logging points that reflect anomalous behavior during system fault. We evaluate our approach on three popular software systems AcmeAir, HDFS and TensorFlow. Results show that it can significantly improve fault diagnosis accuracy by 50{\%} on average compared to the developers' manually placed logging points.}, +author = {Jia, Tong and Li, Ying and Zhang, Chengbo and Xia, Wensheng and Jiang, Jie and Liu, Yuhong}, +doi = {10.1109/ISSREW.2018.00-22}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Jia et al. - 2018 - Machine Deserves Better Logging A Log Enhancement Approach for Automatic Fault Diagnosis(2).pdf:pdf}, +isbn = {9781538694435}, +journal = {Proceedings - 29th IEEE International Symposium on Software Reliability Engineering Workshops, ISSREW 2018}, +keywords = {Automatic fault diagnosis,Log enhancement,logging points}, +pages = {106--111}, publisher = {IEEE}, -title = {{A case study of the effects of architecture debt on software evolution effort}}, +title = {{Machine Deserves Better Logging: A Log Enhancement Approach for Automatic Fault Diagnosis}}, year = {2018} } -@article{Gurumdimma2016, -abstract = {The use of console logs for error detection in large scale distributed systems has proven to be useful to system administrators. However, such logs are typically redundant and incomplete, making accurate detection very difficult. In an attempt to increase this accuracy, we complement these incomplete console logs with resource usage data, which captures the resource utilisation of every job in the system. We then develop a novel error detection methodology, the CRUDE approach, that makes use of both the resource usage data and console logs. We thus make the following specific technical contributions: we develop (i) a clustering algorithm to group nodes with similar behaviour, (ii) an anomaly detection algorithm to identify jobs with anomalous resource usage, (iii) an algorithm that links jobs with anomalous resource usage with erroneous nodes. We then evaluate our approach using console logs and resource usage data from the Ranger Supercomputer. Our results are positive: (i) our approach detects errors with a true positive rate of about 80{\%}, and (ii) when compared with the well-known Nodeinfo error detection algorithm, our algorithm provides an average improvement of around 85{\%} over Nodeinfo, with a best-case improvement of 250{\%}.}, -author = {Gurumdimma, Nentawe and Jhumka, Arshad and Liakata, Maria and Chuah, Edward and Browne, James}, -doi = {10.1109/SRDS.2016.017}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Gurumdimma et al. - 2016 - CRUDE Combining Resource Usage Data and Error Logs for Accurate Error Detection in Large-Scale Distributed Sy.pdf:pdf}, -isbn = {9781509035137}, -issn = {10609857}, -journal = {Proceedings of the IEEE Symposium on Reliable Distributed Systems}, -keywords = {anomaly detection,detection,event logs,faults,large-scale HPC systems,resource usage data,unsupervised}, -pages = {51--60}, +@article{Zhu2015, +abstract = {Logging is a common programming practice of practical importance to collect system runtime information for postmortem analysis. Strategic logging placement is desired to cover necessary runtime information without incurring unintended consequences (e.g., performance overhead, trivial logs). However, in current practice, there is a lack of rigorous specifications for developers to govern their logging behaviours. Logging has become an important yet tough decision which mostly depends on the domain knowledge of developers. To reduce the effort on making logging decisions, in this paper, we propose a "learning to log" framework, which aims to provide informative guidance on logging during development. As a proof of concept, we provide the design and implementation of a logging suggestion tool, LogAdvisor, which automatically learns the common logging practices on where to log from existing logging instances and further leverages them for actionable suggestions to developers. Specifically, we identify the important factors for determining where to log and extract them as structural features, textual features, and syntactic features. Then, by applying machine learning techniques (e.g., feature selection and classifier learning) and noise handling techniques, we achieve high accuracy of logging suggestions. We evaluate LogAdvisor on two industrial software systems from Microsoft and two open-source software systems from GitHub (totally 19.1M LOC and 100.6K logging statements). The encouraging experimental results, as well as a user study, demonstrate the feasibility and effectiveness of our logging suggestion tool. We believe our work can serve as an important first step towards the goal of "learning to log".}, +author = {Zhu, Jieming and He, Pinjia and Fu, Qiang and Zhang, Hongyu and Lyu, Michael R. and Zhang, Dongmei}, +doi = {10.1109/ICSE.2015.60}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Zhu et al. - 2015 - Learning to log Helping developers make informed logging decisions.pdf:pdf}, +isbn = {9781479919345}, +issn = {02705257}, +journal = {Proceedings - International Conference on Software Engineering}, +pages = {415--425}, publisher = {IEEE}, -title = {{CRUDE: Combining Resource Usage Data and Error Logs for Accurate Error Detection in Large-Scale Distributed Systems}}, -year = {2016} +title = {{Learning to log: Helping developers make informed logging decisions}}, +volume = {1}, +year = {2015} } -@article{Fedaghi2010, -abstract = {Information security audit is a monitoring/logging mechanism to ensure compliance with regulations and to detect abnormalities, security breaches, and privacy violations; however, auditing too many events causes overwhelming use of system resources and impacts performance. Consequently, a classification of events is used to prioritize events and configure the log system. Rules can be applied according to this classification to make decisions about events to be archived and types of actions invoked by events. Current classification methodologies are fixed to specific types of incident occurrences and applied in terms of system-dependent description. In this paper, we propose a conceptual model that produces an implementation-independent logging scheme to monitor events.}, -author = {Al-Fedaghi, Sabah and Mahdi, Fahad}, -doi = {10.5121/ijnsa.2010.2205}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Al-Fedaghi, Mahdi - 2010 - Events Classification in Log Audit.pdf:pdf}, -issn = {09752307}, -journal = {International journal of Network Security {\&} Its Applications}, -number = {2}, -pages = {58--73}, -title = {{Events Classification in Log Audit}}, -volume = {2}, -year = {2010} +@inproceedings{Bekeneva2020, +abstract = {In recent years, process mining algorithms are widely used for process analysis. As input data for process mining algorithms,.xes files are used. This format has a limitation for a number of attributes; therefore, in case of registering a single event with several monitoring devices, there is problem of generating event logs based on heterogeneous data. In this paper, an algorithm for generating event logs based on data from heterogeneous monitoring devices is proposed. The most important parameters for the analysis of events are taken into account. Examples of the formation of event logs when choosing a different set of source data are given, the influence of the number and composition of the selected attributes on the result of building business process models is analyzed.}, +author = {Bekeneva, Yana A.}, +booktitle = {2020 IEEE Conference of Russian Young Researchers in Electrical and Electronic Engineering (EIConRus)}, +doi = {10.1109/EIConRus49466.2020.9039350}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Bekeneva - 2020 - Algorithm for Generating Event Logs Based on Data from Heterogeneous Sources.pdf:pdf}, +isbn = {978-1-7281-5761-0}, +keywords = {.xes files,event logs,log files,process mining}, +month = {jan}, +pages = {233--236}, +publisher = {IEEE}, +title = {{Algorithm for Generating Event Logs Based on Data from Heterogeneous Sources}}, +year = {2020} } -@article{Locke2021, -abstract = {Logs contain valuable information about the runtime behaviors of software systems. Thus, practitioners rely on logs for various tasks such as debugging, system comprehension, and anomaly detection. However, due to the unstructured nature and large size of logs, there are several challenges that practitioners face with log analysis. In this paper, we propose a novel approach called LogAssist that tackles these challenges and assists practitioners with log analysis. LogAssist provides an organized and concise view of logs by first grouping logs into event sequences (i.e., workflows), which better illustrate the system runtime execution paths. Then, LogAssist compresses the log events in workflows by hiding consecutive events and applying n-gram modeling to identify common event sequences. We evaluated LogAssist on the logs that are generated by two open-source and one enterprise system. We find that LogAssist can reduce the number of log events that practitioners need to investigate by up to 99{\%}. Through a user study with 19 participants, we also find that LogAssist can assist practitioners by reducing the needed time on log analysis tasks by an average of 40{\%}. The participants also rated LogAssist an average of 4.53 out of 5 for improving their experiences of performing log analysis. Finally, we document our experiences and lessons learned from developing and adopting LogAssist in practice. We believe that LogAssist and our reported experiences may lay the basis for future analysis and interactive exploration on logs.}, -author = {Locke, Steven and Li, Heng and Chen, Tse Hsun Peter and Shang, Weiyi and Liu, Wei}, -doi = {10.1109/TSE.2021.3083715}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/LogAssist{\_}Assisting{\_}Log{\_}Analysis{\_}Through{\_}Log{\_}Summarization.pdf:pdf}, -issn = {19393520}, -journal = {IEEE Transactions on Software Engineering}, -keywords = {Anomaly detection,Faces,Log analysis,Runtime,Software systems,Task analysis,Testing,Tools,log abstraction,log compression,log reduction,n-gram modeling,workflow characterization}, -number = {9}, -pages = {3227--3241}, +@article{Rong2018, +abstract = {Background: Logging practice is a critical activity in software development, which aims to offer significant information to understand the runtime behavior of software systems and support better software maintenance. There have been many relevant studies dedicated to logging practice in software engineering recently, yet it lacks a systematic understanding to the adoption state of logging practice in industry and research progress in academia. Objective: This study aims to synthesize relevant studies on the logging practice and portray a big picture of logging practice in software engineering so as to understand current adoption status and identify research opportunities. Method: We carried out a systematic review on the relevant studies on logging practice in software engineering. Results: Our study identified 41 primary studies relevant to logging practice. Typical findings are: (1) Logging practice attracts broad interests among researchers in many concrete research areas. (2) Logging practice occurred in many development types, among which the development of fault tolerance systems is the most adopted type. (3) Many challenges exist in current logging practice in software engineering, e.g., tradeoff between logging overhead and analysis cost, where and what to log, balance between enough logging and system performance, etc. Conclusion: Results show that logging practice plays a vital role in various applications for diverse purposes. However, there are many challenges and problems to be solved. Therefore, various novel techniques are necessary to guide developers conducting logging practice and improve the performance and efficiency of logging practice.}, +author = {Rong, Guoping and Zhang, Qiuping and Liu, Xinbei and Gu, Shenghiu}, +doi = {10.1109/APSEC.2017.61}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Rong et al. - 2017 - A Systematic Review of Logging Practice in Software Engineering.pdf:pdf}, +isbn = {9781538636817}, +issn = {15301362}, +journal = {Proceedings - Asia-Pacific Software Engineering Conference, APSEC}, +keywords = {Logging Practice,Software Engineering,Systematic Literature Review}, +pages = {534--539}, +title = {{A Systematic Review of Logging Practice in Software Engineering}}, +volume = {2017-Decem}, +year = {2018} +} +@article{Song2017, +abstract = {The aligning of event logs with process models is of great significance for process mining to enable conformance checking, process enhancement, performance analysis, and trace repairing. Since process models are increasingly complex and event logs may deviate from process models by exhibiting redundant, missing, and dislocated events, it is challenging to determine the optimal alignment for each event sequence in the log, as this problem is NP-hard. Existing approaches utilize the cost-based A∗ algorithm to address this problem. However, scalability is often not considered, which is especially important when dealing with industrial-sized problems. In this paper, by taking advantage of the structural and behavioral features of process models, we present an efficient approach which leverages effective heuristics and trace replaying to significantly reduce the overall search space for seeking the optimal alignment. We employ real-world business processes and their traces to evaluate the proposed approach. Experimental results demonstrate that our approach works well in most cases, and that it outperforms the state-of-the-art approach by up to 5 orders of magnitude in runtime efficiency.}, +author = {Song, Wei and Xia, Xiaoxu and Jacobsen, Hans-Arno and Zhang, Pengcheng and Hu, Hao}, +doi = {10.1109/TSC.2016.2601094}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Song et al. - 2017 - Efficient Alignment Between Event Logs and Process Models.pdf:pdf}, +issn = {1939-1374}, +journal = {IEEE Transactions on Services Computing}, +keywords = {Event logs,alignment,process decomposition,process models,trace replaying,trace segmentation}, +month = {jan}, +number = {1}, +pages = {136--149}, publisher = {IEEE}, -title = {{LogAssist: Assisting Log Analysis Through Log Summarization}}, -volume = {48}, -year = {2021} +title = {{Efficient Alignment Between Event Logs and Process Models}}, +volume = {10}, +year = {2017} } -@article{Bozhikova2017, -abstract = {The term "quality software" refers to software that is easy to maintain and evolve. The presence of Anti-Patterns and Patterns is recognized as one of the effective ways to measure the quality of modern software systems. The paper presents an approach which supports the software analysis, development and maintenance, using techniques that generate the structure of Software Design Patterns, find Anti-Patterns in the code and perform Code Refactoring. The proposed approach is implemented in a software tool, which could support the real phases of software development and could be used for educational purposes, to support "Advanced Software Engineering" course.}, -author = {Bozhikova, Violeta and Stoeva, Mariana and Georgiev, Bozhidar and Nikolaeva, Dimitrichka}, -doi = {10.1109/ET.2017.8124337}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Bozhikova et al. - 2017 - Improving the software quality — an educational approach(2).pdf:pdf}, -isbn = {9781538617533}, -journal = {2017 26th International Scientific Conference Electronics, ET 2017 - Proceedings}, -keywords = {Software anti-patterns,Software design patterns,Software engineering,Software refactoring}, -pages = {1--4}, -title = {{Improving the software quality - An educational approach}}, -volume = {2017-Janua}, +@article{Lenarduzzi2017, +abstract = {Software maintenance has dramatically evolved in the last four decades, to cope with the continuously changing software development models, and programming languages and adopting increasingly advanced prediction models. In this work, we present the initial results of a Systematic Literature Review (SLR), highlighting the evolution of the metrics and models adopted in the last forty years.}, +author = {Lenarduzzi, Valentina and Sillitti, Alberto and Taibi, Davide}, +doi = {10.1109/ICSE-C.2017.122}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Analyzing{\_}Forty{\_}Years{\_}of{\_}Software{\_}Maintenance{\_}Models.pdf:pdf}, +isbn = {9781538615898}, +journal = {Proceedings - 2017 IEEE/ACM 39th International Conference on Software Engineering Companion, ICSE-C 2017}, +keywords = {Software Maintenance,Systematic Literature Review}, +pages = {146--148}, +publisher = {IEEE}, +title = {{Analyzing Forty years of software maintenance models}}, year = {2017} } @article{Mamone1994, @@ -462,129 +338,41 @@ @article{Mamone1994 month = {jan}, number = {1}, pages = {75--76}, -title = {{The IEEE standard for software maintenance}}, -volume = {19}, -year = {1994} -} -@article{Alenezi2016, -abstract = {See, stats, and : https : / / www . researchgate . net / publication / 296060207 Does Software ? Evidences - Source Article CITATIONS 0 READS 38 2 : Some : Develop (UX) Evaluation Architectural - Source Mamdouh Prince 32 SEE Mohammad . Zarour Prince 32 SEE All . Zarour . The . Abstract Throughout the software evolution , several maintenance actions such as adding new fea - tures , fixing problems , improving the design might negatively or positively affect the software design quality . Quality degradation , if not handled in the right time , can accumulate and cause serious problems for future maintenance effort . Several researchers considered modu - larity as one of the success factors of Open Source Software (OSS) Projects . The modularity of these systems is influenced by some software metrics such as size , complexity , cohesion , and coupling . In this work , we study the modularity evolution of four open - source systems by answering two main research questions namely : what measures can be used to measure the modularity level of software and secondly , did the modularity level for the selected open source software improves over time . By investigating the modularity measures , we have identified the main measures that can be used to measure software modularity . Based on our analysis , the modularity of these two systems is not improving over time . However , the defect density is improving over time .}, -author = {Alenezi, Mamdouh and Zarour, Mohammad}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Alenezi, Zarour - 2016 - Does Software Structures Quality Improve over Software Evolution Evidences from Open - Source Projects.pdf:pdf}, -journal = {Special issue on “Computing Applications and Data Mining” International Journal of Computer Science and Information Security (IJCSIS}, -number = {1}, -pages = {61--75}, -title = {{Does Software Structures Quality Improve over Software Evolution ? Evidences from Open - Source Projects}}, -volume = {14}, -year = {2016} -} -@article{Razavi2008, -abstract = {The component-based nature of large industrial software systems that consist of a number of diverse collaborating applications, pose significant challenges with respect to system maintenance, monitoring, auditing, and diagnosing. In this context, a monitoring and diagnostic system interprets log data to recognize patterns of significant events that conform to specific Threat Models. Threat Models have been used by the software industry for analyzing and documenting a system's risks in order to understand a system's threat profile. In this paper, we propose a framework whereby patterns of significant events are represented as expressions of a specialized monitoring language that are used to annotate specific threat models. An approximate matching technique that is based on the Viterbi algorithm is then used to identify whether system generated events, fit the given patterns. The technique has been applied and evaluated considering threat models and monitoring policies in logs that have been obtained from multi-user MS-Windows{\textcopyright} based systems. {\textcopyright} 2008 IEEE.}, -author = {Razavi, Ali and Kontogiannis, Kostas}, -doi = {10.1109/COMPSAC.2008.81}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Razavi, Kontogiannis - 2008 - Pattern and policy driven log analysis for software monitoring.pdf:pdf}, -isbn = {9780769532622}, -issn = {07303157}, -journal = {Proceedings - International Computer Software and Applications Conference}, -pages = {108--111}, -title = {{Pattern and policy driven log analysis for software monitoring}}, -year = {2008} -} -@article{Port2017, -abstract = {NASA has been successfully sustaining the continuous operation of its critical navigation software systems for over 12 years. To accomplish this, NASA scientists must continuously monitor their process, report on current system quality, forecast maintenance effort, and sustain required staffing levels. This report presents some examples of the use of a robust software metrics and analytics program that enables actionable strategic maintenance management of a critical system (Monte) in a timely, economical, and risk-controlled fashion. This article is part of a special issue on Actionable Analytics for Software Engineering.}, -author = {Port, Dan and Taber, Bill}, -doi = {10.1109/MS.2017.4541055}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Port, Taber - 2017 - Actionable Analytics for Strategic Maintenance of Critical Software An Industry Experience Report.pdf:pdf}, -issn = {07407459}, -journal = {IEEE Software}, -keywords = {Monte,NASA,critical systems,navigation systems,reliability,software analytics,software development,software engineering,software maintenance}, -number = {1}, -pages = {58--63}, -publisher = {IEEE}, -title = {{Actionable Analytics for Strategic Maintenance of Critical Software: An Industry Experience Report}}, -volume = {35}, -year = {2017} -} -@article{Szendrei1990a, -author = {Szendrei, Agnes}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Szendrei - 1990 - Simple surjective algebras having no proper subalgebras(2).pdf:pdf}, -isbn = {1446788700029}, -number = {1813}, -pages = {434--454}, -title = {{SIMPLE SURJECTIVE ALGEBRAS HAVING NO PROPER SUBALGEBRAS This note was inspired by C . Bergman and R . McKenzie ' s recent paper [ 1 ] whose main result is that every locally finite , minimal , congruence modu- lar variety is minimal as a quasivariety . To}}, -volume = {48}, -year = {1990} -} -@article{Al-Saiyd2015, -author = {Al-Saiyd, Nedhal and Zriqat, Esraa}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/EJSR-AnalyzingtheimpactofRequirementChangingonSoftwareDesign.pdf:pdf}, -journal = {European Journal of Scientific Research}, -number = {February}, -title = {{Analyzing the Impact of Requirement Changing on Software Design}}, -volume = {136}, -year = {2015} -} -@article{Zhu2019, -abstract = {Logs are imperative in the development and maintenance process of many software systems. They record detailed runtime information that allows developers and support engineers to monitor their systems and dissect anomalous behaviors and errors. The increasing scale and complexity of modern software systems, however, make the volume of logs explodes. In many cases, the traditional way of manual log inspection becomes impractical. Many recent studies, as well as industrial tools, resort to powerful text search and machine learning-based analytics solutions. Due to the unstructured nature of logs, a first crucial step is to parse log messages into structured data for subsequent analysis. In recent years, automated log parsing has been widely studied in both academia and industry, producing a series of log parsers by different techniques. To better understand the characteristics of these log parsers, in this paper, we present a comprehensive evaluation study on automated log parsing and further release the tools and benchmarks for easy reuse. More specifically, we evaluate 13 log parsers on a total of 16 log datasets spanning distributed systems, supercomputers, operating systems, mobile systems, server applications, and standalone software. We report the benchmarking results in terms of accuracy, robustness, and efficiency, which are of practical importance when deploying automated log parsing in production. We also share the success stories and lessons learned in an industrial application at Huawei. We believe that our work could serve as the basis and provide valuable guidance to future research and deployment of automated log parsing.}, -archivePrefix = {arXiv}, -arxivId = {1811.03509}, -author = {Zhu, Jieming and He, Shilin and Liu, Jinyang and He, Pinjia and Xie, Qi and Zheng, Zibin and Lyu, Michael R.}, -doi = {10.1109/ICSE-SEIP.2019.00021}, -eprint = {1811.03509}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Zhu et al. - 2019 - Tools and Benchmarks for Automated Log Parsing.pdf:pdf}, -isbn = {9781728117607}, -journal = {Proceedings - 2019 IEEE/ACM 41st International Conference on Software Engineering: Software Engineering in Practice, ICSE-SEIP 2019}, -keywords = {AIOps,anomaly detection,log analysis,log management,log parsing}, -pages = {121--130}, -publisher = {IEEE}, -title = {{Tools and Benchmarks for Automated Log Parsing}}, -year = {2019} -} -@article{Kherbouche2017, -abstract = {It is widely observed that the poor event logs quality poses a significant challenge to the process mining project both in terms of choice of process mining algorithms and in terms of the quality of the discovered process model. Therefore, it is important to control the quality of event logs prior to conducting a process mining analysis. In this paper, we propose a qualitative model which aims to assess the quality of event logs before applying process mining algorithms. Our ultimate goal is to give process mining practitioners an overview of the quality of event logs which can help to indicate whether the event log quality is good enough to proceed to process mining and in this case, to suggest both the needed preprocessing steps and the process mining algorithm that is most tailored under such a circumstance. The qualitative model has been evaluated using both artificial and real-life case studies.}, -author = {Kherbouche, Mohammed Oussama and Laga, Nassim and Masse, Pierre Aymeric}, -doi = {10.1109/SSCI.2016.7849946}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Kherbouche, Laga, Masse - 2017 - Towards a better assessment of event logs quality.pdf:pdf}, -isbn = {9781509042401}, -journal = {2016 IEEE Symposium Series on Computational Intelligence, SSCI 2016}, -keywords = {event logs,process mining,process mining algorithms,qualitative model}, -publisher = {IEEE}, -title = {{Towards a better assessment of event logs quality}}, -year = {2017} +title = {{The IEEE standard for software maintenance}}, +volume = {19}, +year = {1994} } -@article{Event1800, -author = {Event, Common}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Song, Luo, Chen - 2008 - Behavior pattern mining Apply process mining technology to common event logs of information systems.pdf:pdf}, -isbn = {978-1-4244-1685-1}, -journal = {Event (London)}, -pages = {1800--1805}, -title = {{Behavior Pattern Mining: Apply Process Mining Technology}}, -year = {1800} +@misc{EventBubbling, +author = {JavaScript.Info}, +title = {{Bubbling and capturing}}, +url = {https://javascript.info/bubbling-and-capturing{\#}capturing}, +urldate = {2022-08-14} } -@article{Charette1997, -abstract = {Risk management in maintenance differs in major ways from risk management in development. Risk opportunities are more frequent, risks come from more diverse sources, and projects have less freedom to act on them. The authors describe how they dealt with these differences in a large US Navy software maintenance organization.}, -author = {Charette, Robert N. and Adams, Kevin Mac G. and White, Mary B.}, -doi = {10.1109/52.589232}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Charette, Adams, White - 1997 - Managing risk in software maintenance.pdf:pdf}, -issn = {07407459}, -journal = {IEEE Software}, -number = {3}, -pages = {43--50}, -title = {{Managing risk in software maintenance}}, -volume = {14}, -year = {1997} +@article{Bounnady2016, +abstract = {Nowadays, web applications play an important role for many organizations, and there are many technologies variable in the market and each technology have its own advantage, so choosing the technologies is one important factor. This research studies of performance processing speed of two common technologies for developing web applications namely PHP and ASP.NET. These technologies run on environment as Windows operating system where ASP.NET using IIS will be compared with PHP using IIS as well as the PHP using Apache. The comparison is conducted through five approaches including webpage loading; algorithm processing; database managing; file(s) uploading and reading/writing external file(s). The results from our research demonstrate as ASP.NET is more effective than PHP in various areas such as webpage loading (1.81 times faster), external file(s) reading/writing (3.77 times faster) and Algorithm calculation (Hanoi Tower 14.74 times faster). However, PHP can operating more efficiently in some other areas such as variable datatypes exchanging (adding big numbers PHP is 6.82 times faster), database managing (PHP is 1.45 times faster) and files uploading (PHP is 1.17 times faster).}, +author = {Bounnady, Khampheth and Phanthavong, Khampaseuth and Pathoumvanh, Somsanouk and Sihalath, Keokanlaya}, +doi = {10.1109/ECTICon.2016.7561484}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Bounnady et al. - 2016 - Comparison the processing speed between PHP and ASP.NET.pdf:pdf}, +isbn = {9781467397490}, +journal = {2016 13th International Conference on Electrical Engineering/Electronics, Computer, Telecommunications and Information Technology, ECTI-CON 2016}, +keywords = {ASP.Net,performance comparison,php,processing speed,server site scripting}, +pages = {0--4}, +publisher = {IEEE}, +title = {{Comparison the processing speed between PHP and ASP.NET}}, +year = {2016} } -@article{Rehman2018, -abstract = {Agile methodologies gained fame due to the fact of producing high-quality software systems. Maintenance effort is almost more than half of the total effort invested in any software system during its lifespan. A well-discussed issue within the community of researchers and engineers is how to use agile methodologies for maintaining the developed software because agile software development life cycle doesn't have the specifically planned mechanism for maintenance. To bridge this gap, we used the theoretical and empirical technique to formulate factors that should be followed during the agile maintenance including planning for the maintenance; the on-site customer should be present, iterative maintenance, documentation update after each phase and maintenance should be testable.}, -author = {Rehman, Fateh Ur and Maqbool, Bilal and Riaz, Muhammad Qasim and Qamar, Usman and Abbas, Muhammad}, -doi = {10.1109/NCG.2018.8593152}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Rehman et al. - 2018 - Scrum Software Maintenance Model Efficient Software Maintenance in Agile Methodology.pdf:pdf}, -isbn = {9781538641095}, -journal = {21st Saudi Computer Society National Computer Conference, NCC 2018}, -keywords = {Agile,Agile Maintenance,Maintenance Sprints,Scrum,Scrum Maintenance,Software Maintenance}, -pages = {7--11}, +@article{Reimanis2016, +abstract = {Providing software developers and researchers with useful technical debt analysis tools is an instrumental outcome of software engineering and technical debt research. Such tools aggregate industry best practices to provide users with organized and quantifiable metrics that can help minimize the time it takes to synthesize and make an intelligent future decision regarding a system. Today, most tools rely primarily on structural measurements from static analysis to generate results. However, it is also necessary to consider measurements that capture the behavior of software, as these represent additional complexities within a system that structural measurements are incapable of detecting. Herein, we present our position, that more effort needs to be placed towards understanding software behavior so that technical debt analysis tools can begin supporting them, in order to provide tool users with a more accurate and complete view of their system. In this paper, we describe this problem in the context of design patterns and outline an effective method to talk about behaviors in the future. We create and classify two example behaviors using our method, both of which increase the technical debt in their respective design pattern applications.}, +author = {Reimanis, Derek and Izurieta, Clemente}, +doi = {10.1109/MTD.2016.13}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Reimanis, Izurieta - 2016 - Towards Assessing the Technical Debt of Undesired Software Behaviors in Design Patterns.pdf:pdf}, +isbn = {9781509038541}, +journal = {Proceedings - 2016 IEEE 8th International Workshop on Managing Technical Debt, MTD 2016}, +keywords = {design patterns,softare behavior,software architecture,technical debt}, +pages = {24--27}, publisher = {IEEE}, -title = {{Scrum Software Maintenance Model: Efficient Software Maintenance in Agile Methodology}}, -year = {2018} +title = {{Towards Assessing the Technical Debt of Undesired Software Behaviors in Design Patterns}}, +year = {2016} } @article{Shah2012, abstract = {Context: Defects are an ineludible component of software, Defect Density (DD) - defined as the number of defects divided by size - is often used as a related measure of quality. Project managers and researchers alike would benefit a lot from overview DD figures from software projects, the former for decision making the latter for state-of-the-practice assessment. Objective: In this paper, we collect and aggregate DD figures published in literature, in addition we characterize DD as a function of different project factors in terms of central tendency and dispersion. The factors considered include development mode - open vs. closed source - , programming language, size, and age. Results: We were able to identify 19 papers reporting defect density figures concerning 109 software projects. The mean DD for the studied sample of projects is 7.47 post release defects per thousand lines of code (KLoC), the median is 4.3 with a standard deviation of 7.99. Development mode, is characterized by statistically meaningful different DD, the same for Java vs. C. Besides, in the studied sample large projects exhibited lower DD than medium and small projects. Conclusion: The study is a first step in collecting and analyzing DD figures for the purpose of characterizing one important aspect of software quality. These figures can be used both by researchers and project managers interested to evaluate their projects. Further work is needed to extend the data set and to identify predictors of defect density. {\textcopyright} 2012 IEEE.}, @@ -601,30 +389,142 @@ @article{Shah2012 volume = {1}, year = {2012} } -@article{Zhuo1993, -abstract = {Software metrics are used to quantitatively characterize the essential features of software. The paper investigates the use of metrics in assessing software maintainability by presenting and comparing seven software maintainability assessment models. Eight software systems were used for initial construction and calibrating the automated assessment models, and an additional six software systems were used for testing the results. A comparison was made between expert software engineers' subjective assessment of the 14 individual software systems and the maintainability indices calculated by the seven models based on complexity metrics automatically derived from those systems. Initial tests show very high correlations between the automated assessment techniques and the subjective expert evaluations.}, -author = {Zhuo, Fang and Lowther, Bruce and Oman, Paul and Hagemeister, Jack}, -doi = {10.1109/METRIC.1993.263800}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Constructing{\_}and{\_}testing{\_}software{\_}maintainability{\_}assessment{\_}models.pdf:pdf;:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Zhuo et al. - 1993 - Constructing and testing software maintainability assessment models.pdf:pdf}, -isbn = {0818637404}, -journal = {Proceedings - 1st International Software Metrics Symposium, METRIC 1993}, -pages = {61--70}, -title = {{Constructing and testing software maintainability assessment models}}, -year = {1993} +@article{Waqar2017, +abstract = {Tracking users' posting activities in online classified ads and understanding the dynamics of their behavior is a topic of great importance with many implications. However, some of the underlying problems associated with modeling users and detecting their behavioral changes due to temporal and spatial variations have not been well-studied. In this paper, we develop a probabilistic model of user behavior based on the ads the user posts and the categories in which the ads are posted. The model can track some of the temporal changes in behavior, as revealed by our experiments on two classes of users monitored over a period of almost a year. We study the association between post categories and user groups, and show how temporal and seasonal changes can be detected. We further investigate a generative model for ad posts, based on user locations, and provide some evidence showing that the model is promising and that some interesting relationships can be identified.}, +author = {Waqar, Muhammad and Rafiei, Davood}, +doi = {10.1109/WI.2016.0088}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Waqar, Rafiei - 2017 - Tracking User Activities and Marketplace Dynamics in Classified Ads.pdf:pdf}, +isbn = {9781509044702}, +journal = {Proceedings - 2016 IEEE/WIC/ACM International Conference on Web Intelligence, WI 2016}, +keywords = {Classified ads,Temporal analysis,User modeling,User tracking}, +pages = {522--525}, +publisher = {IEEE}, +title = {{Tracking User Activities and Marketplace Dynamics in Classified Ads}}, +year = {2017} } -@article{Gu2010, -abstract = {Introduced WebForms, MVC and MVP architecture works under the NET platform,,the characteristics of the three structures were analyzed and compared, especially described WebForms's ViewState, the performance and client ID pollution problems and give solutions; combing their respective advantages, noted suitable scenarios of three kinds framework, aiming to supply reference for developing Web system. {\textcopyright}2010 IEEE.}, -author = {Gu, Ming Xia and Tang, Keming}, -doi = {10.1109/ESIAT.2010.5567323}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Comparative{\_}analysis{\_}of{\_}WebForms{\_}MVC{\_}and{\_}MVP{\_}architecture.pdf:pdf}, -isbn = {9781424473885}, -journal = {2010 2nd Conference on Environmental Science and Information Application Technology, ESIAT 2010}, -keywords = {ASP.NET,MVC,MVP,Software architecture,Web system}, -pages = {391--394}, -title = {{Comparative analysis of WebForms MVC and MVP architecture}}, +@article{Li2018, +abstract = {This is an extended abstract of a paper published in the Empirical Software Engineering journal. The original paper is communicated by Mark Grechanik. The paper empirically studied how developers assign log levels to their logging statements and proposed an automated approach to help developers determine the most appropriate log level when they add a new logging statement. We analyzed the development history of four open source projects (Hadoop, Directory Server, Hama, and Qpid). We found that our automated approach can accurately suggest the levels of logging statements with an AUC of 0.75 to 0.81. We also found that the characteristics of the containing block of a newly-added logging statement, the existing logging statements in the containing source code file, and the content of the newly-added logging statement play important roles in determining the appropriate log level for that logging statement.}, +author = {Li, Heng and Shang, Weiyi and Hassan, Ahmed E.}, +doi = {10.1109/SANER.2018.8330234}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Li, Shang, Hassan - 2018 - Which log level should developers choose for a new logging statement (journal-first abstract).pdf:pdf}, +isbn = {9781538649695}, +journal = {25th IEEE International Conference on Software Analysis, Evolution and Reengineering, SANER 2018 - Proceedings}, +keywords = {log level,ordinal regression model,software logging}, +number = {4}, +pages = {468}, +publisher = {IEEE}, +title = {{Which log level should developers choose for a new logging statement? (journal-first abstract)}}, +volume = {2018-March}, +year = {2018} +} +@article{Harkness2007, +abstract = {The latest version of Microsoft Visual Basic (2005) is built upon the Microsoft.NET Framework. It has finally become a fully-fledged Object Oriented Language with all the associated features one would come to expect. It allows Visual Basic programmers to tackle much larger applications, through improved scalability and reusability. This article discusses the new features using code examples to real applications in the Laboratory Automation environment. {\textcopyright} 2007, Society for Laboratory Automation and Screening. All rights reserved.}, +author = {Harkness, Robert and Crook, Malcolm and Povey, David}, +doi = {10.1016/j.jala.2006.10.014}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/HARKNESS, CROOK, POVEY - 2007 - Programming Review of Visual Basic.NET for the Laboratory Automation Industry(2).pdf:pdf}, +issn = {15402452}, +journal = {Journal of Laboratory Automation}, +keywords = {.NET,laboratory automation,object oriented,visual basic}, +number = {1}, +pages = {25--32}, +title = {{Programming Review of Visual Basic.NET for the Laboratory Automation Industry}}, +volume = {12}, +year = {2007} +} +@article{Gralha2018, +abstract = {We use Grounded Theory to study the evolution of requirements practices of 16 software startups as they grow and introduce new products and services. These startups operate in a dynamic environment, with significant time and market pressure, and rarely have time for systematic requirements analysis. Our theory describes the evolution of practice along six dimensions that emerged as relevant to their requirements activities: requirements artefacts, knowledge management, requirements-related roles, planning, technical debt and product quality. Beyond the relationships among the dimensions, our theory also explains the turning points that drove the evolution along these dimensions. These changes are reactive, rather than planned, suggesting an overall pragmatic lightness, i.e., flexibility, in the startups' evolution towards engineering practices for requirements. Our theory organises knowledge about evolving requirements practice in maturing startups, and provides practical insights for startups' assessing their own evolution as they face challenges to their growth. Our research also suggests that a startup's evolution along the six dimensions is not fundamental to its success, but has significant effects on their product, their employees and the company.}, +author = {Gralha, Catarina and Damian, Daniela and Wasserman, Anthony I.Tony and Goul{\~{a}}o, Miguel and Ara{\'{u}}jo, Jo{\~{a}}o}, +doi = {10.1145/3180155.3180158}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Gralha et al. - 2018 - The evolution of requirements practices in software startups.pdf:pdf}, +isbn = {9781450356381}, +issn = {02705257}, +journal = {Proceedings - International Conference on Software Engineering}, +keywords = {Evolution,Grounded theory,Requirements engineering,Startups}, +pages = {823--833}, +title = {{The evolution of requirements practices in software startups}}, +year = {2018} +} +@article{Paliouras1999, +abstract = {The World Wide Web has become a major source of information that can be turned into valuable knowledge for individuals and organisations. In the work presented here, we are concerned with the extraction of meta-knowledge from the Web. In particular, knowledge about Web usage which is invaluable to the construction of Web sites that meet their purposes and prevent disorientation. Towards this goal, we propose the organisation of the users of a Web site into groups with common navigational behaviour (user communities). We view the task of building user communities as a data mining task, searching for interesting patterns within a database. The database that we use in our experiments consists of access logs collected from the Web site of the Advanced Course on Artificial Intelligence 1999. The unsupervised machine learning algorithm COBWEB is used to organise the users of the site, who follow similar paths, into a small set of communities. Particular attention is paid to the interpretation of the communities that are generated through this process. For this purpose, we use a simple metric to identify the representative navigational behaviour for each community. This information can then be used by the administrators of the site to re-organise it in a way that is tailored to the needs of each community. The proposed Web usage analysis is much more insightful than the common approach of examining simple usage statistics of the Web site.}, +author = {Paliouras, Georgios and Papatheodorou, Christos and Karkaletsis, Vangelis and Spyropoulos, Costantine and Tzitziras, Panayiotis}, +doi = {10.1109/icsmc.1999.825226}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/From{\_}Web{\_}usage{\_}statistics{\_}to{\_}Web{\_}usage{\_}analysis.pdf:pdf}, +isbn = {0780357310}, +issn = {08843627}, +journal = {Proceedings of the IEEE International Conference on Systems, Man and Cybernetics}, +pages = {159--164}, +title = {{From Web usage statistics to Web usage analysis}}, volume = {2}, +year = {1999} +} +@article{Rahmani2010, +abstract = {Open source software (OSS) development is considered an effective approach to ensuring acceptable levels of software quality. One facet of quality improvement involves the detection of potential relationship between defect density and other open source software metrics. This paper presents an empirical study of the relationship between defect density and download number, software size and developer number as three popular repository metrics. This relationship is explored by examining forty-four randomly selected open source software projects retrieved from SourceForge.net. By applying simple and multiple linear regression analysis, the results reveal a statistically significant relationship between defect density and number of developers and software size jointly. However, despite theoretical expectations, no significant relationship was found between defect density and number of downloads in OSS projects. {\textcopyright} 2010 IEEE.}, +author = {Rahmani, Cobra and Khazanchi, Deepak}, +doi = {10.1109/ICIS.2010.11}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Rahmani, Khazanchi - 2010 - A study on defect density of open source software(2).pdf:pdf}, +isbn = {9780769541471}, +journal = {Proceedings - 9th IEEE/ACIS International Conference on Computer and Information Science, ICIS 2010}, +keywords = {Bug repository,Defect density,Linear regression,Open source software,Software repository metric}, +pages = {679--683}, +publisher = {IEEE}, +title = {{A study on defect density of open source software}}, year = {2010} } +@article{Ackermann2009, +abstract = {In this paper, we analyze software that we inherited from another party. We analyze its architecture and use common design principles to identify critical changes in order to improve its flexibility with respect to a set of planned extensions. We describe flexibility issues that we encountered and how they were addressed by a redesign and re-implementation. The study shows that basic and well-established design concepts can be used to guide the design and redesign of software. {\textcopyright} 2009 IEEE.}, +author = {Ackermann, Christopher and Lindvall, Mikael and Dennis, Greg}, +doi = {10.1109/CSMR.2009.60}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Ackermann, Lindvall, Dennis - 2009 - Redesign for flexibility and maintainability a case study.pdf:pdf;:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Redesign{\_}for{\_}Flexibility{\_}and{\_}Maintainability{\_}A{\_}Case{\_}Study.pdf:pdf}, +isbn = {978-1-4244-3755-9}, +issn = {15345351}, +journal = {2009 13th European Conference on Software Maintenance and Reengineering}, +month = {mar}, +pages = {259--262}, +publisher = {IEEE}, +title = {{Redesign for flexibility and maintainability: a case study}}, +year = {2009} +} +@article{Locke2021, +abstract = {Logs contain valuable information about the runtime behaviors of software systems. Thus, practitioners rely on logs for various tasks such as debugging, system comprehension, and anomaly detection. However, due to the unstructured nature and large size of logs, there are several challenges that practitioners face with log analysis. In this paper, we propose a novel approach called LogAssist that tackles these challenges and assists practitioners with log analysis. LogAssist provides an organized and concise view of logs by first grouping logs into event sequences (i.e., workflows), which better illustrate the system runtime execution paths. Then, LogAssist compresses the log events in workflows by hiding consecutive events and applying n-gram modeling to identify common event sequences. We evaluated LogAssist on the logs that are generated by two open-source and one enterprise system. We find that LogAssist can reduce the number of log events that practitioners need to investigate by up to 99{\%}. Through a user study with 19 participants, we also find that LogAssist can assist practitioners by reducing the needed time on log analysis tasks by an average of 40{\%}. The participants also rated LogAssist an average of 4.53 out of 5 for improving their experiences of performing log analysis. Finally, we document our experiences and lessons learned from developing and adopting LogAssist in practice. We believe that LogAssist and our reported experiences may lay the basis for future analysis and interactive exploration on logs.}, +author = {Locke, Steven and Li, Heng and Chen, Tse Hsun Peter and Shang, Weiyi and Liu, Wei}, +doi = {10.1109/TSE.2021.3083715}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/LogAssist{\_}Assisting{\_}Log{\_}Analysis{\_}Through{\_}Log{\_}Summarization.pdf:pdf}, +issn = {19393520}, +journal = {IEEE Transactions on Software Engineering}, +keywords = {Anomaly detection,Faces,Log analysis,Runtime,Software systems,Task analysis,Testing,Tools,log abstraction,log compression,log reduction,n-gram modeling,workflow characterization}, +number = {9}, +pages = {3227--3241}, +publisher = {IEEE}, +title = {{LogAssist: Assisting Log Analysis Through Log Summarization}}, +volume = {48}, +year = {2021} +} +@article{Szendrei1990, +abstract = {We prove that every finite, simple, surjective algebra having no proper subalgebras is either quasiprimal or affine or isomorphic to an algebra term equivalent to a matrix power of a unary permutational algebra. Consequently, it generates a minimal variety if and only if it is quasiprimal. We show also that a locally finite, minimal variety omitting type 1 is minimal as a quasivariety if and only if it has a unique subdirectly irreducible algebra. {\textcopyright} 1990, Australian Mathematical Society. All rights reserved.}, +author = {Szendrei, {\'{A}}gnes}, +doi = {10.1017/S1446788700029979}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Simple{\_}Surjective{\_}Algebras{\_}Having{\_}no{\_}Proper{\_}Subalg.pdf:pdf}, +isbn = {1446788700029}, +issn = {14468107}, +journal = {Journal of the Australian Mathematical Society}, +number = {3}, +pages = {434--454}, +title = {{Simple Surjective Algebras Having no Proper Subalgebras}}, +volume = {48}, +year = {1990} +} +@article{Yao2009, +abstract = {In order to find out the user patterns that hide in web logs, log mining technology is one of the best ways. Log mining is the usage of data mining in the field of web server' logs. Although there are a set of softwares which can be used to analysis web logs, the algorithm raised in this article pay special attention to discover the relationship among all the pages of the web site. In this algorithm, size-link radio and static inner-link degree was creative used. According to the result of experiment, this algorithm can exactly find out the correlative ones among massive pages.}, +author = {Yao, Lei Yue and Xiong, Jian Ying}, +doi = {10.1109/GRC.2009.5255028}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/The{\_}research{\_}and{\_}implementation{\_}of{\_}a{\_}correlative{\_}degree{\_}mining{\_}algorithm{\_}based{\_}on{\_}IIS{\_}logs.pdf:pdf}, +isbn = {9781424448319}, +journal = {2009 IEEE International Conference on Granular Computing, GRC 2009}, +keywords = {IIS log analysis,Web log analysis,Web usage mining}, +pages = {706--709}, +title = {{The research and implementation of a correlative degree mining algorithm based on IIS logs}}, +year = {2009} +} @article{Ogheneovo2014, abstract = {As software becomes more and more complex due to increased number of module size, procedure size, and branching complexity, software maintenance costs are often on the increase. Consider a software such as Windows 2000 operating systems with over 29 million lines of code (LOC), 480,000 pages if printed, a stack of paper 161 feet high, estimate of 63,000 bugs in the software when it was first released [1] and with over 1000 developers, there is no doubt that such a large and com-plex software will require large amount of money (in US Dollars), social and environmental factors to maintain it. It has been estimated that over 70{\%} of the total costs of software development process is expended on maintenance after the software has been delivered. This paper studies the relationship between software complexity and maintenance cost, the factors responsible for soft-ware complexity and why maintenance costs increase with software complexity. Some data col-lected on Windows, Debian Linux, and Linux Kernel operating systems were used. The results of our findings show that there is a strong correlation between software complexity and mainten-ance costs. That is, as lines of code increase, the software becomes more complex and more bugs may be introduced, and hence the cost of maintaining software increases.}, author = {Ogheneovo, Edward E.}, @@ -639,120 +539,171 @@ @article{Ogheneovo2014 volume = {02}, year = {2014} } -@article{Vijayasarathy2016, -abstract = {Organizations can choose from software development methodologies ranging from traditional to agile approaches. Researchers surveyed project managers and other team members about their choice of methodologies. The results indicate that although agile methodologies such as Agile Unified Process and Scrum are more prevalent than 10 years ago, traditional methodologies, including the waterfall model, are still popular. Organizations are also taking a hybrid approach, using multiple methodologies on projects. Furthermore, their choice of methodologies is associated with certain organizational, project, and team characteristics.}, -author = {Vijayasarathy, Leo R. and Butler, Charles W.}, -doi = {10.1109/MS.2015.26}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Vijayasarathy, Butler - 2016 - Choice of Software Development Methodologies Do Organizational, Project, and Team Characteristics Matter.pdf:pdf}, +@article{Enoiu2020, +abstract = {Software testing is a complex, intellectual activity based (at least) on analysis, reasoning, decision making, abstraction and collaboration performed in a highly demanding environment. Naturally, it uses and allocates multiple cognitive resources in software testers. However, while a cognitive psychology perspective is increasingly used in the general software engineering literature, it has yet to find its place in software testing. To the best of our knowledge, no theory of software testers' cognitive processes exists. Here, we take the first step towards such a theory by presenting a cognitive model of software testing based on how problem solving is conceptualized in cognitive psychology. Our approach is to instantiate a general problem solving process for the specific problem of creating test cases. We then propose an experiment for testing our cognitive test design model. The experiment makes use of verbal protocol analysis to understand the mechanisms by which human testers choose, design, implement and evaluate test cases. An initial evaluation was then performed with five software engineering master students as subjects. The results support a problem solving-based model of test design for capturing testers' cognitive processes.}, +archivePrefix = {arXiv}, +arxivId = {2007.08927}, +author = {Enoiu, Eduard and Tukseferi, Gerald and Feldt, Robert}, +doi = {10.1109/QRS-C51114.2020.00053}, +eprint = {2007.08927}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Towards{\_}a{\_}Model{\_}of{\_}Testers{\_}Cognitive{\_}Processes{\_}Software{\_}Testing{\_}as{\_}a{\_}Problem{\_}Solving{\_}Approach.pdf:pdf}, +isbn = {9781728189154}, +journal = {Proceedings - Companion of the 2020 IEEE 20th International Conference on Software Quality, Reliability, and Security, QRS-C 2020}, +keywords = {behavioral software testing,cognitive model,cognitive processes,cognitive psychology,exploratory testing,human aspects,human based software testing,manual test design,problem solving,software testing,test creation,test design,verbal protocol,verbal protocol analysis}, +pages = {272--279}, +title = {{Towards a Model of Testers' Cognitive Processes: Software Testing as a Problem Solving Approach}}, +year = {2020} +} +@article{Niu2018, +abstract = {This article summarizes the RE in the Age of Continuous Deployment panel at the 25th IEEE International Requirements Engineering Conference. It highlights two synergistic points (user stories and linguistic tooling) and one challenge (nonfunctional requirements) in fast-paced, agile-like projects, and recommends how to carry on the dialogue.}, +author = {Niu, Nan and Brinkkemper, Sjaak and Franch, Xavier and Partanen, Jari and Savolainen, Juha}, +doi = {10.1109/MS.2018.1661332}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Niu et al. - 2018 - Requirements engineering and continuous deployment.pdf:pdf}, issn = {07407459}, journal = {IEEE Software}, -keywords = {agile methodologies,hybrid software development,organizational characteristics,project characteristics,software development,software development methodologies,software engineering,team characteristics,traditional methodologies,waterfall model}, -number = {5}, -pages = {86--94}, +keywords = {25th IEEE International Requirements Engineering C,RE in the Age of Continuous Deployment,agile software development,continuous deployment,linguistic tooling,nonfunctional requirements,requirements engineering,software development,software engineering,software requirements,user stories}, +number = {2}, +pages = {86--90}, publisher = {IEEE}, -title = {{Choice of Software Development Methodologies: Do Organizational, Project, and Team Characteristics Matter?}}, -volume = {33}, -year = {2016} +title = {{Requirements engineering and continuous deployment}}, +volume = {35}, +year = {2018} } -@article{Hemmati2017, -abstract = {In digital health communication domain, the software is usually offered as a service (SaaS) which mandates attracting customers' satisfaction within each use of service. We report a case study in this domain which provides secure web based mail services that establish a secure bridge between patients and medical personnel. In this context, we explore service usage to guide value prediction for existing and prospective customers. Understanding the behavior of clients can help optimize services by providing them to users based on past usage trends or patterns. Therefore, service usage analysis in digital health products can pave the road for efficient communications and feature utilization in software in the health-care market.}, -author = {Hemmati, Ashkan and Carlson, Chris and Nayebi, Maleknaz and Ruhe, Guenther and Saunders, Chad}, -doi = {10.1109/QRS-C.2017.95}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Analysis{\_}of{\_}Software{\_}Service{\_}Usage{\_}in{\_}Healthcare{\_}Communication{\_}Services.pdf:pdf}, +@article{Tian2017, +abstract = {A web service reliability test method for C/S architecture software based on log analysis is presented in this paper. In this method, the software usage model is constructed automatically to describe the real situation on the users' access to the web service by Markov chain. The test cases are generated according to Random Walk and applied to software reliability test. In the experiment process, MTBF (focusing on server crash) was chosen to be the software reliability evaluation index. Through the testing and analysis of a real web software, MTBF obtained by testing result is similar to that from the realistic log, and the web service reliability test method is validated.}, +author = {Tian, Xuetao and Li, Honghui and Liu, Feng}, +doi = {10.1109/QRS-C.2017.38}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Tian, Li, Liu - 2017 - Web Service Reliability Test Method Based on Log Analysis.pdf:pdf}, isbn = {9781538620724}, journal = {Proceedings - 2017 IEEE International Conference on Software Quality, Reliability and Security Companion, QRS-C 2017}, -keywords = {Feature usage analysis,product value estimation,requirement engineering,software as a service}, -pages = {565--566}, +keywords = {Log Analysis,Markov Usage Model,Reliability Test,Test Cases,Web Service}, +pages = {195--199}, publisher = {IEEE}, -title = {{Analysis of Software Service Usage in Healthcare Communication Services}}, +title = {{Web Service Reliability Test Method Based on Log Analysis}}, year = {2017} } -@techreport{Kitchenham2007, -abstract = {The objective of this report is to propose comprehensive guidelines for systematic literature reviews appropriate for software engineering researchers, including PhD students. A systematic literature review is a means of evaluating and interpreting all available research relevant to a particular research question, topic area, or phenomenon of interest. Systematic reviews aim to present a fair evaluation of a research topic by using a trustworthy, rigorous, and auditable methodology. The guidelines presented in this report were derived from three existing guidelines used by medical researchers, two books produced by researchers with social science backgrounds and discussions with researchers from other disciplines who are involved in evidence-based practice. The guidelines have been adapted to reflect the specific problems of software engineering research. The guidelines cover three phases of a systematic literature review: planning the review, conducting the review and reporting the review. They provide a relatively high level description. They do not consider the impact of the research questions on the review procedures, nor do they specify in detail the mechanisms needed to perform meta-analysis.}, -author = {Kitchenham, B.A. and Charters, S}, -booktitle = {Technical Report EBSE 2007- 001. Keele University and Durham University Joint Report}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Kitchenham-2007Systematicreviews5-8.pdf:pdf}, -institution = {Keele University}, -number = {January}, -title = {{Guidelines for performing Systematic Literature Reviews in Software Engineering (Software Engineering Group, Department of Computer Science, Keele {\ldots}}}, -year = {2007} +@article{Ganapathi, +author = {Ganapathi, Archana and Stanford, Berkeley}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Ganapathi, Stanford - Unknown - Failure Analysis of Internet Services.pdf:pdf}, +pages = {1--17}, +title = {{Failure Analysis of Internet Services}} } -@article{Indonesia2019, -author = {Indonesia, Universitas and Xplore, Ieee}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Indonesia, Xplore - 2019 - INTERNATIONAL STANDARD ISO IEC IEEE Systems and software engineering — management.pdf:pdf}, -title = {{INTERNATIONAL STANDARD ISO / IEC / IEEE Systems and software engineering — management}}, -volume = {2019}, -year = {2019} +@article{Hasan2012, +abstract = {Software maintenance constitutes a critical function that enables organizations to continually leverage their information technology (IT) capabilities. Despite the growing importance of small organizations, a majority of the software maintenance guidelines are inherently geared toward large organizations. Literature review and case-based empirical studies show that in small organizations software maintenance processes are carried out without following a systemic process. Rather, they rely on ad-hoc and heuristics methods by organizations and individuals. This paper investigates software maintenance practices in a small information systems organization to come up with the nature and categories of heuristics used that successfully guided the software maintenance process. Specifically, this paper documents a set of best practices that small organizations can adopt to facilitate their software maintenance processes in the absence of maintenance-specific guidelines based on preliminary empirical investigation.}, +author = {Hasan, Raza and Chakraborty, Suranjan and Dehlinger, Josh}, +doi = {10.1007/978-3-642-23202-2_9}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Hasan, Chakraborty, Dehlinger - 2012 - Examining software maintenance processes in small organizations Findings from a case study.pdf:pdf}, +isbn = {9783642232015}, +issn = {1860949X}, +journal = {Studies in Computational Intelligence}, +keywords = {Small organizations,ad-hoc process,case study,cognitive heuristics,software maintenance}, +pages = {129--143}, +title = {{Examining software maintenance processes in small organizations: Findings from a case study}}, +volume = {377}, +year = {2012} } -@article{Ren2011, -abstract = {Software maintenance is an important stage of software life cycle, according to the problems of software maintenance process model, research software maintenance process model, focus on software after delivery to retire between the software maintenance activities. Firstly, descript of the eight software maintenance process; then, research software maintenance process model of the structure and content, including the rapid change model, Boehm model, IEEE model, iterative enhancement four models; finally, analyze to the four models of software maintenance process, provide the basis for the selection of the model. This study's content includes the improvement of software maintenance process, guide maintenance activities, improve the quality of software maintenance, ensure the normal application software, which has important theoretical and practical significance. {\textcopyright} 2011 IEEE.}, -author = {Ren, Yongchang and Tao, Xing and Liu, Zhongjing and Chen, Xiaoji}, -doi = {10.1109/ICIII.2011.324}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Yongchang et al. - 2011 - Software Maintenance Process Model and Contrastive Analysis.pdf:pdf}, -isbn = {9780769545233}, -journal = {Proceedings - 2011 4th International Conference on Information Management, Innovation Management and Industrial Engineering, ICIII 2011}, -keywords = {Comparative analysis,Maintenance process,Process model,Software maintenance}, -pages = {169--172}, +@inproceedings{Thamburaj2017, +author = {Thamburaj, T Francis and Aloysius, A.}, +booktitle = {2017 World Congress on Computing and Communication Technologies (WCCCT)}, +doi = {10.1109/WCCCT.2016.54}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Thamburaj, Aloysius - 2017 - Models for Maintenance Effort Prediction with Object-Oriented Cognitive Complexity Metrics.pdf:pdf}, +isbn = {978-1-5090-5573-9}, +keywords = {cognitive complexity,maintenance effort prediction,metrics,object-oriented metrics,software maintenance}, +month = {feb}, +pages = {191--194}, publisher = {IEEE}, -title = {{Software maintenance process model and contrastive analysis}}, -volume = {3}, -year = {2011} +title = {{Models for Maintenance Effort Prediction with Object-Oriented Cognitive Complexity Metrics}}, +year = {2017} } -@article{Hasiloglu2018, -abstract = {Personal data have been compiled and harnessed by a great number of establishments to execute their legal activities. Establishments are legally bound to maintain the confidentiality and security of personal data. Hence it is a requirement to provide access logs for the personal information. Depending on the needs and capacity, personal data can be opened to the users via platforms such as file system, database and web service. Web service platform is a popular alternative since it is autonomous and can isolate the data source from the user. In this paper, the way to log personal data accessed via web service method has been discussed. As an alternative to classical method in which logs were recorded and saved by client applications, a different mechanism of forming a central audit log with API manager has been investigated. By forging a model policy to exemplify central logging method, its advantages and disadvantages have been explored. It has been concluded in the end that this model could be employed in centrally recording audit logs.}, -author = {Hasiloglu, Abdulsamet and Bali, Abdulkadir}, -doi = {10.1109/ISDFS.2018.8355333}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Hasiloglu, Bali - 2018 - Central audit logging mechanism in personal data web services.pdf:pdf}, -isbn = {9781538634493}, -journal = {6th International Symposium on Digital Forensic and Security, ISDFS 2018 - Proceeding}, -keywords = {API,API Policy,Audit Logging,Personal Data,Web Service}, -pages = {1--3}, -title = {{Central audit logging mechanism in personal data web services}}, -volume = {2018-Janua}, +@article{Dalpiaz2018, +abstract = {90{\%} of agile practitioners employ user stories for capturing requirements. Of these, 70{\%} follow a simple template when creating user stories: As a {\textless}role{\textgreater} I want to {\textless}action{\textgreater}, [so that {\textless}benefit{\textgreater}]. User stories' popularity among practitioners and their simple yet strict structure make them ideal candidates for automatic reasoning based on natural language processing. In our research, we have found that circa 50{\%} of real-world user stories contain easily preventable errors that may endanger their potential. To alleviate this problem, we have created methods, theories and tools that support creating better user stories. This tutorial combines our previous work into a pipeline for working with user stories: (1) The basics of creating user stories, and their use in requirements engineering; (2) How to improve user story quality with the Quality User Story Framework and AQUSA tool; and (3) How to generate conceptual models from user stories using the Visual Narrator and the Interactive Narrator tools. Our toolset is demonstrated with results obtained from 20+ software companies employing user stories.}, +author = {Dalpiaz, Fabiano and Brinkkemper, Sjaak}, +doi = {10.1109/RE.2018.00075}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Dalpiaz, Brinkkemper - 2018 - Agile requirements engineering with user stories.pdf:pdf;:C$\backslash$:/Users/cscheepers/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Dalpiaz, Brinkkemper - 2018 - Agile requirements engineering with user stories.pdf:pdf}, +isbn = {9781538674185}, +journal = {Proceedings - 2018 IEEE 26th International Requirements Engineering Conference, RE 2018}, +keywords = {Agile requirements engineering,Natural language processing,User stories}, +pages = {506--507}, +title = {{Agile requirements engineering with user stories}}, year = {2018} } -@article{Ackermann2009, -abstract = {In this paper, we analyze software that we inherited from another party. We analyze its architecture and use common design principles to identify critical changes in order to improve its flexibility with respect to a set of planned extensions. We describe flexibility issues that we encountered and how they were addressed by a redesign and re-implementation. The study shows that basic and well-established design concepts can be used to guide the design and redesign of software. {\textcopyright} 2009 IEEE.}, -author = {Ackermann, Christopher and Lindvall, Mikael and Dennis, Greg}, -doi = {10.1109/CSMR.2009.60}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Ackermann, Lindvall, Dennis - 2009 - Redesign for flexibility and maintainability a case study.pdf:pdf;:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Redesign{\_}for{\_}Flexibility{\_}and{\_}Maintainability{\_}A{\_}Case{\_}Study.pdf:pdf}, -isbn = {978-1-4244-3755-9}, -issn = {15345351}, -journal = {2009 13th European Conference on Software Maintenance and Reengineering}, -month = {mar}, -pages = {259--262}, +@article{EvangelinGeetha2007, +abstract = {Software performance is an important nonfunctional attribute of software systems for producing quality software. Performance issues must be considered throughout software project development. Predicting performance early in the life cycle is addressed by many methodologies, but the data collected during feasibility study not considered for predicting performance. In this paper, we consider the data collected (technical and environmental factors) during feasibility study of software project management to predict performance. We derive an algorithm to predict the performance metrics and simulate the results using a case study on banking application. {\textcopyright}2007 IEEE.}, +author = {{Evangelin Geetha}, D. and {Suresh Kumar}, T. V. and {Rajani Kanth}, K.}, +doi = {10.1109/ICICS.2007.4449845}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Evangelin Geetha, Suresh Kumar, Rajani Kanth - 2007 - Predicting performance of software systems during feasibility study of software pr.pdf:pdf}, +isbn = {1424409837}, +journal = {2007 6th International Conference on Information, Communications and Signal Processing, ICICS}, +keywords = {Feasibility study,Software performance engineering,Use case point}, +pages = {1--5}, +title = {{Predicting performance of software systems during feasibility study of software project management}}, +year = {2007} +} +@article{Wang2008, +abstract = {Ajax is a new concept of web application development proposed in 2005. It is the acronym of Asynchronous JavaScript and XML. Once Ajax appeared, it is rapidly applied to the fields of web development. Ajax application is different from the traditional web development model, using asynchronous interaction. The client unnecessarily waits while the server processes the data submitted. So the use of Ajax can create web user interface which is direct, highly available, richer, more dynamic and closer to a local desktop application. This article introduces the main technology and superiority of Ajax firstly, and then practices web development using ASP.NET 2.0 + Ajax. In this paper, Ajax is applied to the website pass, which enables user to have better registration experience and enhances the user's enthusiasm. The registration functions are enhanced greatly as well. The experiments show that the Ajax web application development model is superior to the traditional web application development model significantly. {\textcopyright}2008 IEEE.}, +author = {Wang, H. and Yang, J.}, +doi = {10.1109/ICIEA.2008.4582637}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Wang, Yang - 2008 - Research and application of web development based on ASP.NET 2.0Ajax.pdf:pdf}, +isbn = {9781424417186}, +journal = {2008 3rd IEEE Conference on Industrial Electronics and Applications, ICIEA 2008}, +pages = {857--860}, +title = {{Research and application of web development based on ASP.NET 2.0+Ajax}}, +year = {2008} +} +@inproceedings{Kumar2017, +author = {Kumar, Manoj and Meenu}, +booktitle = {2017 International conference of Electronics, Communication and Aerospace Technology (ICECA)}, +doi = {10.1109/ICECA.2017.8212820}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Kumar - 2017 - Web Log Expert Tool.pdf:pdf}, +isbn = {978-1-5090-5685-9}, +keywords = {web server log,web usage mining}, +month = {apr}, +pages = {296--301}, publisher = {IEEE}, -title = {{Redesign for flexibility and maintainability: a case study}}, -year = {2009} +title = {{Analysis of visitor's behavior from web log using web log expert tool}}, +year = {2017} } -@article{Cinque2013, -abstract = {Event logs have been widely used over the last three decades to analyze the failure behavior of a variety of systems. Nevertheless, the implementation of the logging mechanism lacks a systematic approach and collected logs are often inaccurate at reporting software failures: This is a threat to the validity of log-based failure analysis. This paper analyzes the limitations of current logging mechanisms and proposes a rule-based approach to make logs effective to analyze software failures. The approach leverages artifacts produced at system design time and puts forth a set of rules to formalize the placement of the logging instructions within the source code. The validity of the approach, with respect to traditional logging mechanisms, is shown by means of around 12,500 software fault injection experiments into real-world systems. {\textcopyright} 2012 IEEE.}, -author = {Cinque, Marcello and Cotroneo, Domenico and Pecchia, Antonio}, -doi = {10.1109/TSE.2012.67}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Event{\_}Logs{\_}for{\_}the{\_}Analysis{\_}of{\_}Software{\_}Failures{\_}A{\_}Rule-Based{\_}Approach.pdf:pdf}, -issn = {00985589}, -journal = {IEEE Transactions on Software Engineering}, -keywords = {Event log,error detection,logging mechanism,rule-based logging,software failures}, -number = {6}, -pages = {806--821}, +@article{Khan2013, +abstract = {Software Development Projects can vary considerably in difficulty, size and type. This has led to evolution and development of many associated project management methodologies and standard SDLC-Models. This paper acknowledges the risks associated with wrong selection of SDLC-models on business critical software projects and offers a pragmatic solution by proposing a handy selection matrix for choosing best-fit SDLC models on different types of Software Development Projects, covering both traditional and agile methodologies. This paper is the result of an study carried out to evaluate the methods {\&} practices of Project Life Cycle Model Selection actually used and practiced on the projects selected for this study (from businesses and IT-industry in India), with overall objective of proposing better methods and prescriptive guidance for decision making process for right selection of SDLC-Model on business critical software development projects. Right selection of SDLC-Methodology using a decision support tool can and will help successful completion of business critical software development projects and realization of business objectives for which the projects were undertaken. {\textcopyright} 2013 IEEE.}, +author = {Khan, P. M. and Beg, M. M.S.Sufyan}, +doi = {10.1109/ACCT.2013.12}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Extended{\_}Decision{\_}Support{\_}Matrix{\_}for{\_}Selection{\_}of{\_}SDLC-Models{\_}on{\_}Traditional{\_}and{\_}Agile{\_}Software{\_}Development{\_}Projects.pdf:pdf}, +isbn = {9780769549415}, +issn = {23270632}, +journal = {International Conference on Advanced Computing and Communication Technologies, ACCT}, +keywords = {SDLC,SDLC Selection,System Development Life Cycle}, +pages = {8--15}, publisher = {IEEE}, -title = {{Event logs for the analysis of software failures: A rule-based approach}}, -volume = {39}, +title = {{Extended decision support matrix for selection of sdlc-models on traditional and agile software development projects}}, year = {2013} } -@article{Rong2018, -abstract = {Background: Logging practice is a critical activity in software development, which aims to offer significant information to understand the runtime behavior of software systems and support better software maintenance. There have been many relevant studies dedicated to logging practice in software engineering recently, yet it lacks a systematic understanding to the adoption state of logging practice in industry and research progress in academia. Objective: This study aims to synthesize relevant studies on the logging practice and portray a big picture of logging practice in software engineering so as to understand current adoption status and identify research opportunities. Method: We carried out a systematic review on the relevant studies on logging practice in software engineering. Results: Our study identified 41 primary studies relevant to logging practice. Typical findings are: (1) Logging practice attracts broad interests among researchers in many concrete research areas. (2) Logging practice occurred in many development types, among which the development of fault tolerance systems is the most adopted type. (3) Many challenges exist in current logging practice in software engineering, e.g., tradeoff between logging overhead and analysis cost, where and what to log, balance between enough logging and system performance, etc. Conclusion: Results show that logging practice plays a vital role in various applications for diverse purposes. However, there are many challenges and problems to be solved. Therefore, various novel techniques are necessary to guide developers conducting logging practice and improve the performance and efficiency of logging practice.}, -author = {Rong, Guoping and Zhang, Qiuping and Liu, Xinbei and Gu, Shenghiu}, -doi = {10.1109/APSEC.2017.61}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Rong et al. - 2017 - A Systematic Review of Logging Practice in Software Engineering.pdf:pdf}, -isbn = {9781538636817}, -issn = {15301362}, -journal = {Proceedings - Asia-Pacific Software Engineering Conference, APSEC}, -keywords = {Logging Practice,Software Engineering,Systematic Literature Review}, -pages = {534--539}, -title = {{A Systematic Review of Logging Practice in Software Engineering}}, -volume = {2017-Decem}, -year = {2018} +@article{Szendrei1990a, +author = {Szendrei, Agnes}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Szendrei - 1990 - Simple surjective algebras having no proper subalgebras(2).pdf:pdf}, +isbn = {1446788700029}, +number = {1813}, +pages = {434--454}, +title = {{SIMPLE SURJECTIVE ALGEBRAS HAVING NO PROPER SUBALGEBRAS This note was inspired by C . Bergman and R . McKenzie ' s recent paper [ 1 ] whose main result is that every locally finite , minimal , congruence modu- lar variety is minimal as a quasivariety . To}}, +volume = {48}, +year = {1990} +} +@inproceedings{Wiese2021, +abstract = {Technical Debts (TD) are problems of the internal software quality. They are often contracted due to tight project deadlines, for example quick fixes and workarounds, and can make future changes more costly or impossible. TD prevention should be more important than TD repayment, because subsequent refactoring and reengineering is usually more expensive than building the right solution from the beginning. While there are numerous works on TD repayment, solutions for TD prevention are understudied. This paper evaluates a framework that focuses on both TD prevention and TD repayment. It was developed by and applied in an IT unit of a publishing house. The unique contribution of this framework is the integration of TD management into project management. The evaluation was carried out by a study based on ticket statistics and a structured survey with participants from the observed IT unit and a comparison unit. The evaluation shows that the adoption of this framework leads to a raised awareness for the contraction of TD. This results in benefits like more rational discussions and decisions, TD prevention and timelier repayment of TD tickets.}, +archivePrefix = {arXiv}, +arxivId = {2103.10317}, +author = {Wiese, Marion and Riebisch, Matthias and Schwarze, Julian}, +booktitle = {2021 IEEE/ACM International Conference on Technical Debt (TechDebt)}, +doi = {10.1109/TechDebt52882.2021.00018}, +eprint = {2103.10317}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Preventing{\_}Technical{\_}Debt{\_}by{\_}Technical{\_}Debt{\_}Aware{\_}Project{\_}Management.pdf:pdf}, +isbn = {978-1-6654-1405-0}, +keywords = {Technical Debt,Technical Debt Aware Project Management,Technical Debt Awareness,Technical Debt Prevention,Technical Debt Repayment}, +month = {may}, +pages = {84--93}, +publisher = {IEEE}, +title = {{Preventing Technical Debt by Technical Debt Aware Project Management}}, +url = {http://arxiv.org/abs/2103.10317 https://ieeexplore.ieee.org/document/9462991/}, +year = {2021} } @article{Sneed2004, abstract = {The purpose of this essay is to present a costing model for software maintenance and evolution based on a separation of fixed and variable costs. There has always been a problem in distinguishing between the maintenance activities covered by the standard maintenance fee and those charged extra to the user. Separating these two types of costs is essential to every maintenance operation to prevent costs from getting out of control. In this paper the author proposes a solution, which can lead to better cost estimations and a financially more stable maintenance operation. Particular emphasis is placed on a sharp division between work done to maintain the system functionality as it is and work done to enhance that functionality. {\textcopyright} 2004 IEEE.}, @@ -766,46 +717,64 @@ @article{Sneed2004 title = {{A cost model for software maintenance {\&} evolution}}, year = {2004} } -@article{Lenarduzzi2017, -abstract = {Software maintenance has dramatically evolved in the last four decades, to cope with the continuously changing software development models, and programming languages and adopting increasingly advanced prediction models. In this work, we present the initial results of a Systematic Literature Review (SLR), highlighting the evolution of the metrics and models adopted in the last forty years.}, -author = {Lenarduzzi, Valentina and Sillitti, Alberto and Taibi, Davide}, -doi = {10.1109/ICSE-C.2017.122}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Analyzing{\_}Forty{\_}Years{\_}of{\_}Software{\_}Maintenance{\_}Models.pdf:pdf}, -isbn = {9781538615898}, -journal = {Proceedings - 2017 IEEE/ACM 39th International Conference on Software Engineering Companion, ICSE-C 2017}, -keywords = {Software Maintenance,Systematic Literature Review}, -pages = {146--148}, -publisher = {IEEE}, -title = {{Analyzing Forty years of software maintenance models}}, -year = {2017} +@techreport{Kitchenham2007, +abstract = {The objective of this report is to propose comprehensive guidelines for systematic literature reviews appropriate for software engineering researchers, including PhD students. A systematic literature review is a means of evaluating and interpreting all available research relevant to a particular research question, topic area, or phenomenon of interest. Systematic reviews aim to present a fair evaluation of a research topic by using a trustworthy, rigorous, and auditable methodology. The guidelines presented in this report were derived from three existing guidelines used by medical researchers, two books produced by researchers with social science backgrounds and discussions with researchers from other disciplines who are involved in evidence-based practice. The guidelines have been adapted to reflect the specific problems of software engineering research. The guidelines cover three phases of a systematic literature review: planning the review, conducting the review and reporting the review. They provide a relatively high level description. They do not consider the impact of the research questions on the review procedures, nor do they specify in detail the mechanisms needed to perform meta-analysis.}, +author = {Kitchenham, B.A. and Charters, S}, +booktitle = {Technical Report EBSE 2007- 001. Keele University and Durham University Joint Report}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Kitchenham-2007Systematicreviews5-8.pdf:pdf}, +institution = {Keele University}, +number = {January}, +title = {{Guidelines for performing Systematic Literature Reviews in Software Engineering (Software Engineering Group, Department of Computer Science, Keele {\ldots}}}, +year = {2007} } -@article{Cui2003, -abstract = {Queries to search engines on the Web are usually short. They do not provide sufficient information for an effective selection of relevant documents. Previous research has proposed the utilization of query expansion to deal with this problem. However, expansion terms are usually determined on term co-occurrences within documents. In this study, we propose a new method for query expansion based on user interactions recorded in user logs. The central idea is to extract correlations between query terms and document terms by analyzing user logs. These correlations are then used to select high-quality expansion terms for new queries. Compared to previous query expansion methods, ours takes advantage of the user judgments implied in user logs. The experimental results show that the log-based query expansion method can produce much better results than both the classical search method and the other query expansion methods.}, -author = {Cui, Hang and Wen, Ji Rong and Nie, Jian Yun and Ma, Wei Ying}, -doi = {10.1109/TKDE.2003.1209002}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Hang Cui et al. - 2003 - Query expansion by mining user logs.pdf:pdf}, -issn = {10414347}, -journal = {IEEE Transactions on Knowledge and Data Engineering}, -keywords = {Information retrieval,Probabilistic model,Query expansion,Search engine,User log}, -number = {4}, -pages = {829--839}, -title = {{Query expansion by mining user logs}}, -volume = {15}, -year = {2003} +@article{Sosnowski2011, +author = {Sosnowski, Janusz and Gawkowski, Piotr and Cabaj, Krzysztof}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Sosnowski, Gawkowski, Cabaj - 2014 - Event and performance logs in system management and evaluation.pdf:pdf}, +journal = {Information Systems in Management XIV, Security and Effectiveness of ICT Systems}, +number = {January 2011}, +title = {{Event and Performance Logs in System Management and Evaluation}}, +year = {2011} } -@inproceedings{Thamburaj2017, -author = {Thamburaj, T Francis and Aloysius, A.}, -booktitle = {2017 World Congress on Computing and Communication Technologies (WCCCT)}, -doi = {10.1109/WCCCT.2016.54}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Thamburaj, Aloysius - 2017 - Models for Maintenance Effort Prediction with Object-Oriented Cognitive Complexity Metrics.pdf:pdf}, -isbn = {978-1-5090-5573-9}, -keywords = {cognitive complexity,maintenance effort prediction,metrics,object-oriented metrics,software maintenance}, -month = {feb}, -pages = {191--194}, +@article{Fedaghi2010, +abstract = {Information security audit is a monitoring/logging mechanism to ensure compliance with regulations and to detect abnormalities, security breaches, and privacy violations; however, auditing too many events causes overwhelming use of system resources and impacts performance. Consequently, a classification of events is used to prioritize events and configure the log system. Rules can be applied according to this classification to make decisions about events to be archived and types of actions invoked by events. Current classification methodologies are fixed to specific types of incident occurrences and applied in terms of system-dependent description. In this paper, we propose a conceptual model that produces an implementation-independent logging scheme to monitor events.}, +author = {Al-Fedaghi, Sabah and Mahdi, Fahad}, +doi = {10.5121/ijnsa.2010.2205}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Al-Fedaghi, Mahdi - 2010 - Events Classification in Log Audit.pdf:pdf}, +issn = {09752307}, +journal = {International journal of Network Security {\&} Its Applications}, +number = {2}, +pages = {58--73}, +title = {{Events Classification in Log Audit}}, +volume = {2}, +year = {2010} +} +@inproceedings{Stojanov2017, +abstract = {Software maintenance has been recognized by academicians and practitioners from industry as the most challenging and expensive part in software life cycle. The complexity and high costs of maintenance activities require systematic evidence of all maintenance activities and accurate models for planning and managing them. A common way for analyzing practice in software engineering is based on trend analysis of historical data related to activities and tasks implemented in the past. This paper presents a case study conducted in a micro software company aimed at introducing a schema for classifying maintenance tasks, and identifying trends in software maintenance tasks distribution among the programmers in the company. The discussion of results includes benefits for the company, limitations of the research and implications for academicians and practitioners in industry. The paper concludes with a few promising further research directions.}, +author = {Stojanov, Zeljko and Stojanov, Jelena and Dobrilovic, Dalibor and Petrov, Nikola}, +booktitle = {2017 IEEE 15th International Symposium on Intelligent Systems and Informatics (SISY)}, +doi = {10.1109/SISY.2017.8080547}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Stojanov et al. - 2017 - Trends in software maintenance tasks distribution among programmers A study in a micro software company.pdf:pdf}, +isbn = {978-1-5386-3855-2}, +month = {sep}, +pages = {000023--000028}, publisher = {IEEE}, -title = {{Models for Maintenance Effort Prediction with Object-Oriented Cognitive Complexity Metrics}}, +title = {{Trends in software maintenance tasks distribution among programmers: A study in a micro software company}}, year = {2017} } +@article{VanDerAalst2004, +abstract = {Contemporary workflow management systems are driven by explicit process models, i.e., a completely specified workflow design is required in order to enact a given workflow process. Creating a workflow design is a complicated time-consuming process and, typically, there are discrepancies between the actual workflow processes and the processes as perceived by the management. Therefore, we have developed techniques for discovering workflow models. The starting point for such techniques is a so-called "workflow log" containing information about the workflow process as it is actually being executed. We present a new algorithm to extract a process model from such a log and represent it in terms of a Petri net. However, we will also demonstrate that it is not possible to discover arbitrary workflow processes. In this paper, we explore a class of workflow processes that can be discovered. We show that the $\alpha$-algorithm can successfully mine any workflow represented by a so-called SWF-net.}, +author = {{Van Der Aalst}, Wil and Weijters, Ton and Maruster, Laura}, +doi = {10.1109/TKDE.2004.47}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Workflow{\_}mining{\_}discovering{\_}process{\_}models{\_}from{\_}event{\_}logs.pdf:pdf}, +issn = {10414347}, +journal = {IEEE Transactions on Knowledge and Data Engineering}, +keywords = {Data mining,Petri nets,Workflow management,Workflow mining}, +number = {9}, +pages = {1128--1142}, +title = {{Workflow mining: Discovering process models from event logs}}, +volume = {16}, +year = {2004} +} @article{Antolic2008, abstract = {This paper gives an overview of possible Key Performance Indicators (KPI) that can be used for software process efficiency evaluation. The overview is based on currently used KPIs in software development projects on CPP platform. The most important KPIs are analyzed, and their usage in the process efficiency evaluation is discussed. The outcome of the measurement is used to initiate further process adjustments and improvements. In addition, there is possibility to perform benchmarking between different development projects, and based on collected data easier search for best practices in the projects that can be broadly implemented. Some proposals and future directions in the area of process measurement are given. {\textcopyright} 2008 by Mipro.}, author = {Antoli{\'{c}}, {\v{Z}}}, @@ -817,115 +786,40 @@ @article{Antolic2008 volume = {2}, year = {2008} } -@article{Jain2018, -abstract = {Developing a quality software product is an essential need for the software industry. Focusing on product's quality allows software users to adapt the product more easily and efficiently. Quality plays a vital role for the software users. It is a confirmation of all the requirements according to customer satisfaction. So, it's important to define a proper software development process that leads to a quality software product. Agile being one of the quickest methodologies for software development, allows the quality product to be delivered to the customer side. The objective of this paper is to discuss the impact of Agile Software Development Process (ASDP) on quality of software product by defining the mapping between agile software development process and various quality attributes. The paper presents an overall importance of software development process for a quality product.}, -author = {Jain, Parita and Sharma, Arun and Ahuja, Laxmi}, -doi = {10.1109/ICRITO.2018.8748529}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/The{\_}Impact{\_}of{\_}Agile{\_}Software{\_}Development{\_}Process{\_}on{\_}the{\_}Quality{\_}of{\_}Software{\_}Product.pdf:pdf}, -isbn = {9781538646922}, -journal = {2018 7th International Conference on Reliability, Infocom Technologies and Optimization: Trends and Future Directions, ICRITO 2018}, -keywords = {Agile Software Development Process,Software Design,Software Implementation,Software Maintainability,Software Quality,Software Requirement Analysis,Software Testing}, -pages = {812--815}, -publisher = {IEEE}, -title = {{The Impact of Agile Software Development Process on the Quality of Software Product}}, -year = {2018} -} -@article{Slaninova2014, -abstract = {This paper is focused on log files where one log file attribute is an originator of the recorded activity (originator is a person in our case). Hence, based on the similar attributes of people, we are able to construct models which explain certain aspects of a persons behaviour. Moreover, we can extract user profiles based on behaviour and find latent ties between users and between different user groups with similar behaviours. We accomplish this by our new approach using the methods from log mining, business process analysis, complex networks and graph theory. The paper describes the whole process of the approach from the log file to the user graph. The main focus is on the step called 'The finding of user behavioural patterns'.}, -author = {Slaninov{\'{a}}, Kateřina}, -doi = {10.1109/ISDA.2013.6920751}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Slaninov{\'{a}} - 2014 - User behavioural patterns and reduced user profiles extracted from log files.pdf:pdf}, -isbn = {9781479935161}, -issn = {21647151}, -journal = {International Conference on Intelligent Systems Design and Applications, ISDA}, -keywords = {analysis of users' behaviour,behavioural patterns,complex networks,user profiles}, -pages = {289--294}, -publisher = {IEEE}, -title = {{User behavioural patterns and reduced user profiles extracted from log files}}, -year = {2014} -} -@article{IEEEStandard2011, -author = {{IEEE Standard}}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/ISOIECIEEE{\textcopyright} Std. 42012011 - 2011 - INTERNATIONAL STANDARD ISO IEC IEEE Systems and software engineering — agile environment.pdf:pdf}, -journal = {Ieee Standards}, -title = {{INTERNATIONAL STANDARD ISO / IEC / IEEE Systems and software engineering - Architecture description}}, -volume = {2011}, -year = {2011} -} -@article{Khan2013, -abstract = {Software Development Projects can vary considerably in difficulty, size and type. This has led to evolution and development of many associated project management methodologies and standard SDLC-Models. This paper acknowledges the risks associated with wrong selection of SDLC-models on business critical software projects and offers a pragmatic solution by proposing a handy selection matrix for choosing best-fit SDLC models on different types of Software Development Projects, covering both traditional and agile methodologies. This paper is the result of an study carried out to evaluate the methods {\&} practices of Project Life Cycle Model Selection actually used and practiced on the projects selected for this study (from businesses and IT-industry in India), with overall objective of proposing better methods and prescriptive guidance for decision making process for right selection of SDLC-Model on business critical software development projects. Right selection of SDLC-Methodology using a decision support tool can and will help successful completion of business critical software development projects and realization of business objectives for which the projects were undertaken. {\textcopyright} 2013 IEEE.}, -author = {Khan, P. M. and Beg, M. M.S.Sufyan}, -doi = {10.1109/ACCT.2013.12}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Extended{\_}Decision{\_}Support{\_}Matrix{\_}for{\_}Selection{\_}of{\_}SDLC-Models{\_}on{\_}Traditional{\_}and{\_}Agile{\_}Software{\_}Development{\_}Projects.pdf:pdf}, -isbn = {9780769549415}, -issn = {23270632}, -journal = {International Conference on Advanced Computing and Communication Technologies, ACCT}, -keywords = {SDLC,SDLC Selection,System Development Life Cycle}, -pages = {8--15}, -publisher = {IEEE}, -title = {{Extended decision support matrix for selection of sdlc-models on traditional and agile software development projects}}, +@article{Dwyer2013, +abstract = {Security is one of the biggest concerns of any company that has an IT infrastructure. Windows event logs are a very useful source of data for security information, but sometimes can be nearly impossible to use due to the complexity of log data or the number of events generated per minute. For this reason, event log data must be automatically processed so that an administrator is given a list of events that actually need the administrator's attention. This has been standard in intrusion detection systems for many years to find anomalies in network traffic, but has not been common in event log processing. This paper will adapt these intrusion detection techniques for Windows event log data sets to find anomalies in these log data sets. ?? 2013 ICST.}, +author = {Dwyer, John and Truta, Traian Marius}, +doi = {10.4108/icst.collaboratecom.2013.254136}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Dwyer, Truta - 2013 - Finding anomalies in windows event logs using standard deviation.pdf:pdf}, +isbn = {9781936968923}, +journal = {Proceedings of the 9th IEEE International Conference on Collaborative Computing: Networking, Applications and Worksharing, COLLABORATECOM 2013}, +keywords = {Anomaly Detection,Standard Deviation,Windows Event Logs}, +pages = {563--570}, +title = {{Finding anomalies in windows event logs using standard deviation}}, year = {2013} } -@article{Szendrei1990, -abstract = {We prove that every finite, simple, surjective algebra having no proper subalgebras is either quasiprimal or affine or isomorphic to an algebra term equivalent to a matrix power of a unary permutational algebra. Consequently, it generates a minimal variety if and only if it is quasiprimal. We show also that a locally finite, minimal variety omitting type 1 is minimal as a quasivariety if and only if it has a unique subdirectly irreducible algebra. {\textcopyright} 1990, Australian Mathematical Society. All rights reserved.}, -author = {Szendrei, {\'{A}}gnes}, -doi = {10.1017/S1446788700029979}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Simple{\_}Surjective{\_}Algebras{\_}Having{\_}no{\_}Proper{\_}Subalg.pdf:pdf}, -isbn = {1446788700029}, -issn = {14468107}, -journal = {Journal of the Australian Mathematical Society}, -number = {3}, -pages = {434--454}, -title = {{Simple Surjective Algebras Having no Proper Subalgebras}}, -volume = {48}, -year = {1990} -} -@article{Enoiu2020, -abstract = {Software testing is a complex, intellectual activity based (at least) on analysis, reasoning, decision making, abstraction and collaboration performed in a highly demanding environment. Naturally, it uses and allocates multiple cognitive resources in software testers. However, while a cognitive psychology perspective is increasingly used in the general software engineering literature, it has yet to find its place in software testing. To the best of our knowledge, no theory of software testers' cognitive processes exists. Here, we take the first step towards such a theory by presenting a cognitive model of software testing based on how problem solving is conceptualized in cognitive psychology. Our approach is to instantiate a general problem solving process for the specific problem of creating test cases. We then propose an experiment for testing our cognitive test design model. The experiment makes use of verbal protocol analysis to understand the mechanisms by which human testers choose, design, implement and evaluate test cases. An initial evaluation was then performed with five software engineering master students as subjects. The results support a problem solving-based model of test design for capturing testers' cognitive processes.}, -archivePrefix = {arXiv}, -arxivId = {2007.08927}, -author = {Enoiu, Eduard and Tukseferi, Gerald and Feldt, Robert}, -doi = {10.1109/QRS-C51114.2020.00053}, -eprint = {2007.08927}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Towards{\_}a{\_}Model{\_}of{\_}Testers{\_}Cognitive{\_}Processes{\_}Software{\_}Testing{\_}as{\_}a{\_}Problem{\_}Solving{\_}Approach.pdf:pdf}, -isbn = {9781728189154}, -journal = {Proceedings - Companion of the 2020 IEEE 20th International Conference on Software Quality, Reliability, and Security, QRS-C 2020}, -keywords = {behavioral software testing,cognitive model,cognitive processes,cognitive psychology,exploratory testing,human aspects,human based software testing,manual test design,problem solving,software testing,test creation,test design,verbal protocol,verbal protocol analysis}, -pages = {272--279}, -title = {{Towards a Model of Testers' Cognitive Processes: Software Testing as a Problem Solving Approach}}, -year = {2020} -} -@article{Ganapathi, -author = {Ganapathi, Archana and Stanford, Berkeley}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Ganapathi, Stanford - Unknown - Failure Analysis of Internet Services.pdf:pdf}, -pages = {1--17}, -title = {{Failure Analysis of Internet Services}} -} -@article{Ping2010, -abstract = {Software maintainability is one important aspect in the evaluation of software evolution of a software product. Due to the complexity of tracking maintenance behaviors, it is difficult to accurately predict the cost and risk of maintenance after delivery of software products. In an attempt to address this issue quantitatively, software maintainability is viewed as an inevitable evolution process driven by maintenance behaviors, given a health index at the time when a software product are delivered. A Hidden Markov Model (HMM) is used to simulate the maintenance behaviors shown as their possible occurrence probabilities. And software metrics is the measurement of the quality of a software product and its measurement results of a product being delivered are combined to form the health index of the product. The health index works as a weight on the process of maintenance behavior over time. When the occurrence probabilities of maintenance behaviors reach certain number which is reckoned as the indication of the deterioration status of a software product, the product can be regarded as being obsolete. Longer the time, better the maintainability would be. {\textcopyright} 2010 IEEE.}, -author = {Ping, Liang}, -doi = {10.1109/IFITA.2010.294}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Ping - 2010 - A quantitative approach to software maintainability prediction.pdf:pdf}, -isbn = {9780769541150}, -journal = {Proceedings - 2010 International Forum on Information Technology and Applications, IFITA 2010}, -keywords = {Hidden markov model,Software maintainability,Software metrics}, -pages = {105--108}, +@article{Kocsis2012, +abstract = {Web analytics is a process through which website usage statistics and user behavior data are gathered. Such an analytics program can be used as a tool to get anonym information about users, for example who they are, from which site are they are coming, how often they are visiting the website, how much time do they spent on a site, etc. In this paper we focus on the performance of large websites from the perspective of web analytics and the required extra storage space. {\textcopyright} 2012 IEEE.}, +author = {Kocsis, Gergely and Ekler, Peter}, +doi = {10.1109/CINTI.2012.6496762}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Analyzing{\_}the{\_}resource{\_}requirements{\_}of{\_}usage{\_}statistics{\_}gathering{\_}on{\_}online{\_}newspapers.pdf:pdf}, +isbn = {9781467352062}, +journal = {CINTI 2012 - 13th IEEE International Symposium on Computational Intelligence and Informatics, Proceedings}, +keywords = {Web analysis,Web search engines,Web statistics,high score lists,system and database design}, +pages = {213--218}, publisher = {IEEE}, -title = {{A quantitative approach to software maintainability prediction}}, -volume = {1}, -year = {2010} +title = {{Analyzing the resource requirements of usage statistics gathering on online newspapers}}, +year = {2012} } -@article{Dalpiaz2018, -abstract = {90{\%} of agile practitioners employ user stories for capturing requirements. Of these, 70{\%} follow a simple template when creating user stories: As a {\textless}role{\textgreater} I want to {\textless}action{\textgreater}, [so that {\textless}benefit{\textgreater}]. User stories' popularity among practitioners and their simple yet strict structure make them ideal candidates for automatic reasoning based on natural language processing. In our research, we have found that circa 50{\%} of real-world user stories contain easily preventable errors that may endanger their potential. To alleviate this problem, we have created methods, theories and tools that support creating better user stories. This tutorial combines our previous work into a pipeline for working with user stories: (1) The basics of creating user stories, and their use in requirements engineering; (2) How to improve user story quality with the Quality User Story Framework and AQUSA tool; and (3) How to generate conceptual models from user stories using the Visual Narrator and the Interactive Narrator tools. Our toolset is demonstrated with results obtained from 20+ software companies employing user stories.}, -author = {Dalpiaz, Fabiano and Brinkkemper, Sjaak}, -doi = {10.1109/RE.2018.00075}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Dalpiaz, Brinkkemper - 2018 - Agile requirements engineering with user stories.pdf:pdf;:C$\backslash$:/Users/cscheepers/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Dalpiaz, Brinkkemper - 2018 - Agile requirements engineering with user stories.pdf:pdf}, -isbn = {9781538674185}, -journal = {Proceedings - 2018 IEEE 26th International Requirements Engineering Conference, RE 2018}, -keywords = {Agile requirements engineering,Natural language processing,User stories}, -pages = {506--507}, -title = {{Agile requirements engineering with user stories}}, -year = {2018} +@article{Jans2012, +abstract = {In this paper we discuss the value that process mining of event logs can provide to internal and external auditors. Process mining aims to extract knowledge from event logs recorded by an information system. What makes an event log such a unique and potentially invaluable resource for auditing is not only that it provides the auditor with more data to analyze, but also because that additional data is recorded automatically and independently of the person whose behavior is the subject of the audit. In other words, an event log helps achieve the classic audit principle of “four eyes”, or in modern parlance, act as the equivalent of a surveillance camera, peering over the auditee's shoulder. Until recently, the information contained in event logs was rarely used by auditors. In this paper is considered how process mining can add value to auditing, perhaps even to fundamentally transform it.}, +author = {Jans, Mieke Julie and Alles, Michael Gamini and Vasarhelyi, Miklos A.}, +doi = {10.2139/ssrn.1578912}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Jans, Alles, Vasarhelyi - 2010 - Process Mining of Event Logs in Auditing Opportunities and Challenges.pdf:pdf}, +journal = {SSRN Electronic Journal}, +number = {August 2020}, +title = {{Process Mining of Event Logs in Auditing: Opportunities and Challenges}}, +year = {2012} } @article{Lu2019, abstract = {A decision problem, according to traditional principles, is approached by finding an optimal solution to an analytical programming decision model, which is known as model-driven decision-making. The fidelity of the model determines the quality and reliability of the decision-making; however, the intrinsic complexity of many real-world decision problems leads to significant model mismatch or infeasibility in deriving a model using the first principle. To overcome the challenges that are present in the big data era, both researchers and practitioners emphasize the importance of making decisions that are backed up by data related to decision tasks, a process called data-driven decision-making (D3M). By building on data science, not only can decision models be predicted in the presence of uncertainty or unknown dynamics, but also inherent rules or knowledge can be extracted from data and directly utilized to generate decision solutions. This position paper systematically discusses the basic concepts and prevailing techniques in data-driven decision-making and clusters-related developments in technique into two main categories: programmable data-driven decision-making (P-D3M) and nonprogrammable data-driven decision-making (NP-D3M). This paper establishes a D3M technical framework, main methodologies, and approaches for both categories of D3M, as well as identifies potential methods and procedures for using data to support decision-making. It also provides examples of how D3M is implemented in practice and identifies five further research directions in the D3M area. We believe that this paper will directly support researchers and professionals in their understanding of the fundamentals of D3M and of the developments in technical methods.}, @@ -942,71 +836,58 @@ @article{Lu2019 volume = {3}, year = {2019} } -@article{Waqar2017, -abstract = {Tracking users' posting activities in online classified ads and understanding the dynamics of their behavior is a topic of great importance with many implications. However, some of the underlying problems associated with modeling users and detecting their behavioral changes due to temporal and spatial variations have not been well-studied. In this paper, we develop a probabilistic model of user behavior based on the ads the user posts and the categories in which the ads are posted. The model can track some of the temporal changes in behavior, as revealed by our experiments on two classes of users monitored over a period of almost a year. We study the association between post categories and user groups, and show how temporal and seasonal changes can be detected. We further investigate a generative model for ad posts, based on user locations, and provide some evidence showing that the model is promising and that some interesting relationships can be identified.}, -author = {Waqar, Muhammad and Rafiei, Davood}, -doi = {10.1109/WI.2016.0088}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Waqar, Rafiei - 2017 - Tracking User Activities and Marketplace Dynamics in Classified Ads.pdf:pdf}, -isbn = {9781509044702}, -journal = {Proceedings - 2016 IEEE/WIC/ACM International Conference on Web Intelligence, WI 2016}, -keywords = {Classified ads,Temporal analysis,User modeling,User tracking}, -pages = {522--525}, -publisher = {IEEE}, -title = {{Tracking User Activities and Marketplace Dynamics in Classified Ads}}, -year = {2017} -} -@article{Jia2018, -abstract = {When systems fail, log data is often the most important information source for fault diagnosis. However, the performance of automatic fault diagnosis is limited by the ad-hoc nature of logs. The key problem is that existing developer-written logs are designed for humans rather than machines to automatically detect system anomalies. To improve the quality of logs for fault diagnosis, we propose a novel log enhancement approach which automatically identifies logging points that reflect anomalous behavior during system fault. We evaluate our approach on three popular software systems AcmeAir, HDFS and TensorFlow. Results show that it can significantly improve fault diagnosis accuracy by 50{\%} on average compared to the developers' manually placed logging points.}, -author = {Jia, Tong and Li, Ying and Zhang, Chengbo and Xia, Wensheng and Jiang, Jie and Liu, Yuhong}, -doi = {10.1109/ISSREW.2018.00-22}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Jia et al. - 2018 - Machine Deserves Better Logging A Log Enhancement Approach for Automatic Fault Diagnosis(2).pdf:pdf}, -isbn = {9781538694435}, -journal = {Proceedings - 29th IEEE International Symposium on Software Reliability Engineering Workshops, ISSREW 2018}, -keywords = {Automatic fault diagnosis,Log enhancement,logging points}, -pages = {106--111}, -publisher = {IEEE}, -title = {{Machine Deserves Better Logging: A Log Enhancement Approach for Automatic Fault Diagnosis}}, +@article{Hasiloglu2018, +abstract = {Personal data have been compiled and harnessed by a great number of establishments to execute their legal activities. Establishments are legally bound to maintain the confidentiality and security of personal data. Hence it is a requirement to provide access logs for the personal information. Depending on the needs and capacity, personal data can be opened to the users via platforms such as file system, database and web service. Web service platform is a popular alternative since it is autonomous and can isolate the data source from the user. In this paper, the way to log personal data accessed via web service method has been discussed. As an alternative to classical method in which logs were recorded and saved by client applications, a different mechanism of forming a central audit log with API manager has been investigated. By forging a model policy to exemplify central logging method, its advantages and disadvantages have been explored. It has been concluded in the end that this model could be employed in centrally recording audit logs.}, +author = {Hasiloglu, Abdulsamet and Bali, Abdulkadir}, +doi = {10.1109/ISDFS.2018.8355333}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Hasiloglu, Bali - 2018 - Central audit logging mechanism in personal data web services.pdf:pdf}, +isbn = {9781538634493}, +journal = {6th International Symposium on Digital Forensic and Security, ISDFS 2018 - Proceeding}, +keywords = {API,API Policy,Audit Logging,Personal Data,Web Service}, +pages = {1--3}, +title = {{Central audit logging mechanism in personal data web services}}, +volume = {2018-Janua}, year = {2018} } -@article{Kocsis2012, -abstract = {Web analytics is a process through which website usage statistics and user behavior data are gathered. Such an analytics program can be used as a tool to get anonym information about users, for example who they are, from which site are they are coming, how often they are visiting the website, how much time do they spent on a site, etc. In this paper we focus on the performance of large websites from the perspective of web analytics and the required extra storage space. {\textcopyright} 2012 IEEE.}, -author = {Kocsis, Gergely and Ekler, Peter}, -doi = {10.1109/CINTI.2012.6496762}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Analyzing{\_}the{\_}resource{\_}requirements{\_}of{\_}usage{\_}statistics{\_}gathering{\_}on{\_}online{\_}newspapers.pdf:pdf}, -isbn = {9781467352062}, -journal = {CINTI 2012 - 13th IEEE International Symposium on Computational Intelligence and Informatics, Proceedings}, -keywords = {Web analysis,Web search engines,Web statistics,high score lists,system and database design}, -pages = {213--218}, -publisher = {IEEE}, -title = {{Analyzing the resource requirements of usage statistics gathering on online newspapers}}, -year = {2012} -} -@article{Zhu2015, -abstract = {Logging is a common programming practice of practical importance to collect system runtime information for postmortem analysis. Strategic logging placement is desired to cover necessary runtime information without incurring unintended consequences (e.g., performance overhead, trivial logs). However, in current practice, there is a lack of rigorous specifications for developers to govern their logging behaviours. Logging has become an important yet tough decision which mostly depends on the domain knowledge of developers. To reduce the effort on making logging decisions, in this paper, we propose a "learning to log" framework, which aims to provide informative guidance on logging during development. As a proof of concept, we provide the design and implementation of a logging suggestion tool, LogAdvisor, which automatically learns the common logging practices on where to log from existing logging instances and further leverages them for actionable suggestions to developers. Specifically, we identify the important factors for determining where to log and extract them as structural features, textual features, and syntactic features. Then, by applying machine learning techniques (e.g., feature selection and classifier learning) and noise handling techniques, we achieve high accuracy of logging suggestions. We evaluate LogAdvisor on two industrial software systems from Microsoft and two open-source software systems from GitHub (totally 19.1M LOC and 100.6K logging statements). The encouraging experimental results, as well as a user study, demonstrate the feasibility and effectiveness of our logging suggestion tool. We believe our work can serve as an important first step towards the goal of "learning to log".}, -author = {Zhu, Jieming and He, Pinjia and Fu, Qiang and Zhang, Hongyu and Lyu, Michael R. and Zhang, Dongmei}, -doi = {10.1109/ICSE.2015.60}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Zhu et al. - 2015 - Learning to log Helping developers make informed logging decisions.pdf:pdf}, -isbn = {9781479919345}, -issn = {02705257}, -journal = {Proceedings - International Conference on Software Engineering}, -pages = {415--425}, +@article{Snipes2018, +abstract = {In large-scale software systems, the majority of defective files are architecturally connected, and the architecture connections usually exhibit design flaws, which are associated with higher change-proneness among files and higher maintenance costs. As software evolves with bug fixes, new features, or improvements, unresolved architecture design flaws can contribute to maintenance difficulties. The impact on effort due to architecture design flaws has been difficult to quantify and justify. In this paper, we conducted a case study where we identified flawed architecture relations and quantified their effects on maintenance activities. Using data from this project's source code and revision history, we identified file groups where files are architecturally connected and participated in flawed architecture designs, quantified the maintenance activities in the detected files, and assessed the penalty related to these files.}, +author = {Snipes, Will and Karlekar, Sunil L. and Mo, Ran}, +doi = {10.1109/SEAA.2018.00071}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Snipes, Karlekar, Mo - 2018 - A case study of the effects of architecture debt on software evolution effort.pdf:pdf}, +isbn = {9781538673829}, +journal = {Proceedings - 44th Euromicro Conference on Software Engineering and Advanced Applications, SEAA 2018}, +keywords = {Software architecture,Software maintenance,Technical debt}, +pages = {400--403}, publisher = {IEEE}, -title = {{Learning to log: Helping developers make informed logging decisions}}, -volume = {1}, -year = {2015} +title = {{A case study of the effects of architecture debt on software evolution effort}}, +year = {2018} } -@article{Paliouras1999, -abstract = {The World Wide Web has become a major source of information that can be turned into valuable knowledge for individuals and organisations. In the work presented here, we are concerned with the extraction of meta-knowledge from the Web. In particular, knowledge about Web usage which is invaluable to the construction of Web sites that meet their purposes and prevent disorientation. Towards this goal, we propose the organisation of the users of a Web site into groups with common navigational behaviour (user communities). We view the task of building user communities as a data mining task, searching for interesting patterns within a database. The database that we use in our experiments consists of access logs collected from the Web site of the Advanced Course on Artificial Intelligence 1999. The unsupervised machine learning algorithm COBWEB is used to organise the users of the site, who follow similar paths, into a small set of communities. Particular attention is paid to the interpretation of the communities that are generated through this process. For this purpose, we use a simple metric to identify the representative navigational behaviour for each community. This information can then be used by the administrators of the site to re-organise it in a way that is tailored to the needs of each community. The proposed Web usage analysis is much more insightful than the common approach of examining simple usage statistics of the Web site.}, -author = {Paliouras, Georgios and Papatheodorou, Christos and Karkaletsis, Vangelis and Spyropoulos, Costantine and Tzitziras, Panayiotis}, -doi = {10.1109/icsmc.1999.825226}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/From{\_}Web{\_}usage{\_}statistics{\_}to{\_}Web{\_}usage{\_}analysis.pdf:pdf}, -isbn = {0780357310}, -issn = {08843627}, -journal = {Proceedings of the IEEE International Conference on Systems, Man and Cybernetics}, -pages = {159--164}, -title = {{From Web usage statistics to Web usage analysis}}, -volume = {2}, -year = {1999} +@article{Gurumdimma2016, +abstract = {The use of console logs for error detection in large scale distributed systems has proven to be useful to system administrators. However, such logs are typically redundant and incomplete, making accurate detection very difficult. In an attempt to increase this accuracy, we complement these incomplete console logs with resource usage data, which captures the resource utilisation of every job in the system. We then develop a novel error detection methodology, the CRUDE approach, that makes use of both the resource usage data and console logs. We thus make the following specific technical contributions: we develop (i) a clustering algorithm to group nodes with similar behaviour, (ii) an anomaly detection algorithm to identify jobs with anomalous resource usage, (iii) an algorithm that links jobs with anomalous resource usage with erroneous nodes. We then evaluate our approach using console logs and resource usage data from the Ranger Supercomputer. Our results are positive: (i) our approach detects errors with a true positive rate of about 80{\%}, and (ii) when compared with the well-known Nodeinfo error detection algorithm, our algorithm provides an average improvement of around 85{\%} over Nodeinfo, with a best-case improvement of 250{\%}.}, +author = {Gurumdimma, Nentawe and Jhumka, Arshad and Liakata, Maria and Chuah, Edward and Browne, James}, +doi = {10.1109/SRDS.2016.017}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Gurumdimma et al. - 2016 - CRUDE Combining Resource Usage Data and Error Logs for Accurate Error Detection in Large-Scale Distributed Sy.pdf:pdf}, +isbn = {9781509035137}, +issn = {10609857}, +journal = {Proceedings of the IEEE Symposium on Reliable Distributed Systems}, +keywords = {anomaly detection,detection,event logs,faults,large-scale HPC systems,resource usage data,unsupervised}, +pages = {51--60}, +publisher = {IEEE}, +title = {{CRUDE: Combining Resource Usage Data and Error Logs for Accurate Error Detection in Large-Scale Distributed Systems}}, +year = {2016} +} +@article{Bozhikova2017, +abstract = {The term "quality software" refers to software that is easy to maintain and evolve. The presence of Anti-Patterns and Patterns is recognized as one of the effective ways to measure the quality of modern software systems. The paper presents an approach which supports the software analysis, development and maintenance, using techniques that generate the structure of Software Design Patterns, find Anti-Patterns in the code and perform Code Refactoring. The proposed approach is implemented in a software tool, which could support the real phases of software development and could be used for educational purposes, to support "Advanced Software Engineering" course.}, +author = {Bozhikova, Violeta and Stoeva, Mariana and Georgiev, Bozhidar and Nikolaeva, Dimitrichka}, +doi = {10.1109/ET.2017.8124337}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Bozhikova et al. - 2017 - Improving the software quality — an educational approach(2).pdf:pdf}, +isbn = {9781538617533}, +journal = {2017 26th International Scientific Conference Electronics, ET 2017 - Proceedings}, +keywords = {Software anti-patterns,Software design patterns,Software engineering,Software refactoring}, +pages = {1--4}, +title = {{Improving the software quality - An educational approach}}, +volume = {2017-Janua}, +year = {2017} } @inproceedings{Baccanico2014, abstract = {This paper discusses our preliminary analysis of event logging practices adopted in a large-scale industrial development process at Selex ES, a top-leading Finmeccanica company in electronic and information technologies for defense systems, aerospace, and land security. The analysis aims to support log reengineering activities that are currently conducted at SELEX ES. At time being, some of the issues described in the paper have been fixed by system developers. Analysis encompasses total around 50+ millions lines of log produced by an Air Traffic Control (ATC) system. Analysis reveals that event logging is not strictly regulated by company-wide practices, which results into heterogeneous logs across different development teams. We introduce our ongoing effort at developing an automatic support to browse collected logs along with a uniform logging policy supplementing the reengineering process.}, @@ -1023,63 +904,6 @@ @inproceedings{Baccanico2014 title = {{Event Logging in an Industrial Development Process: Practices and Reengineering Challenges}}, year = {2014} } -@inproceedings{Kumar2017, -author = {Kumar, Manoj and Meenu}, -booktitle = {2017 International conference of Electronics, Communication and Aerospace Technology (ICECA)}, -doi = {10.1109/ICECA.2017.8212820}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Kumar - 2017 - Web Log Expert Tool.pdf:pdf}, -isbn = {978-1-5090-5685-9}, -keywords = {web server log,web usage mining}, -month = {apr}, -pages = {296--301}, -publisher = {IEEE}, -title = {{Analysis of visitor's behavior from web log using web log expert tool}}, -year = {2017} -} -@article{Galster2019, -abstract = {During software maintenance, developers have different information needs (e.g., to understand what type of maintenance activity to perform, the impact of a maintenance activity and its effort). However, information to support developers may be distributed across various sources. Furthermore, information captured in formal architecture documentation may be outdated. In this paper, we put forward a late breaking idea and outline a solution to improve the productivity of developers by providing task-specific recommendations based on concrete information needs that arise during software maintenance.}, -author = {Galster, Matthias and Treude, Christoph and Blincoe, Kelly}, -doi = {10.1109/ICSME.2019.00060}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Supporting{\_}Software{\_}Architecture{\_}Maintenance{\_}by{\_}Providing{\_}Task-Specific{\_}Recommendations.pdf:pdf}, -isbn = {9781728130941}, -journal = {Proceedings - 2019 IEEE International Conference on Software Maintenance and Evolution, ICSME 2019}, -keywords = {Software maintenance,natural language processing,software architecture,text classification}, -pages = {370--372}, -publisher = {IEEE}, -title = {{Supporting Software Architecture Maintenance by Providing Task-Specific Recommendations}}, -year = {2019} -} -@inproceedings{Wiese2021, -abstract = {Technical Debts (TD) are problems of the internal software quality. They are often contracted due to tight project deadlines, for example quick fixes and workarounds, and can make future changes more costly or impossible. TD prevention should be more important than TD repayment, because subsequent refactoring and reengineering is usually more expensive than building the right solution from the beginning. While there are numerous works on TD repayment, solutions for TD prevention are understudied. This paper evaluates a framework that focuses on both TD prevention and TD repayment. It was developed by and applied in an IT unit of a publishing house. The unique contribution of this framework is the integration of TD management into project management. The evaluation was carried out by a study based on ticket statistics and a structured survey with participants from the observed IT unit and a comparison unit. The evaluation shows that the adoption of this framework leads to a raised awareness for the contraction of TD. This results in benefits like more rational discussions and decisions, TD prevention and timelier repayment of TD tickets.}, -archivePrefix = {arXiv}, -arxivId = {2103.10317}, -author = {Wiese, Marion and Riebisch, Matthias and Schwarze, Julian}, -booktitle = {2021 IEEE/ACM International Conference on Technical Debt (TechDebt)}, -doi = {10.1109/TechDebt52882.2021.00018}, -eprint = {2103.10317}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Preventing{\_}Technical{\_}Debt{\_}by{\_}Technical{\_}Debt{\_}Aware{\_}Project{\_}Management.pdf:pdf}, -isbn = {978-1-6654-1405-0}, -keywords = {Technical Debt,Technical Debt Aware Project Management,Technical Debt Awareness,Technical Debt Prevention,Technical Debt Repayment}, -month = {may}, -pages = {84--93}, -publisher = {IEEE}, -title = {{Preventing Technical Debt by Technical Debt Aware Project Management}}, -url = {http://arxiv.org/abs/2103.10317 https://ieeexplore.ieee.org/document/9462991/}, -year = {2021} -} -@article{Gralha2018, -abstract = {We use Grounded Theory to study the evolution of requirements practices of 16 software startups as they grow and introduce new products and services. These startups operate in a dynamic environment, with significant time and market pressure, and rarely have time for systematic requirements analysis. Our theory describes the evolution of practice along six dimensions that emerged as relevant to their requirements activities: requirements artefacts, knowledge management, requirements-related roles, planning, technical debt and product quality. Beyond the relationships among the dimensions, our theory also explains the turning points that drove the evolution along these dimensions. These changes are reactive, rather than planned, suggesting an overall pragmatic lightness, i.e., flexibility, in the startups' evolution towards engineering practices for requirements. Our theory organises knowledge about evolving requirements practice in maturing startups, and provides practical insights for startups' assessing their own evolution as they face challenges to their growth. Our research also suggests that a startup's evolution along the six dimensions is not fundamental to its success, but has significant effects on their product, their employees and the company.}, -author = {Gralha, Catarina and Damian, Daniela and Wasserman, Anthony I.Tony and Goul{\~{a}}o, Miguel and Ara{\'{u}}jo, Jo{\~{a}}o}, -doi = {10.1145/3180155.3180158}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Gralha et al. - 2018 - The evolution of requirements practices in software startups.pdf:pdf}, -isbn = {9781450356381}, -issn = {02705257}, -journal = {Proceedings - International Conference on Software Engineering}, -keywords = {Evolution,Grounded theory,Requirements engineering,Startups}, -pages = {823--833}, -title = {{The evolution of requirements practices in software startups}}, -year = {2018} -} @article{Potey2013, abstract = {Query log is the pouch of valuable information that records user's search queries and related actions on the internet. By mining the recorded information, it is possible to exploit the user's underlying goals, preferences, interests, search behaviors and implicit feedback. The wealth of mined information can be used in many applications such as query log analysis, query recommendation, query reformulation, query intent identification and many more to improve performance of search engine by providing more relevant results. Over the past decade, there has been tremendous work done for improving search engine results to flourish the users for searching. This paper reviews and compares some of the available methods to give an insight into the area of query log processing for information retrieval. Our approach classifies web query intent based on knowledge extraction from query log analysis. {\textcopyright} 2013 IEEE.}, author = {Potey, Madhuri A. and Patel, Dhanashri A. and Sinha, P. K.}, @@ -1093,6 +917,115 @@ @article{Potey2013 title = {{A survey of query log processing techniques and evaluation of web query intent identification}}, year = {2013} } +@article{Dhanalakshmi2016, +abstract = {The increased on-line applications are leading to exponential growth of the web content. Most of the business organizations are interested to know the web user behavior to enhance their business. In this context, users navigation in static and dynamic web applications plays an important role in understanding user's interests. The static mining techniques may not be suitable as it is for dynamic web log files and decision making. Traditional web log preprocessing approaches and weblog usage patterns have limitations to analyze the content relationship with the browsing history This paper, focuses on various static web log preprocessing and mining techniques and their applicable limitations for dynamic web mining.}, +author = {Dhanalakshmi, P. and Ramani, K. and Reddy, B. Eswara}, +doi = {10.1109/IACC.2016.35}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Dhanalakshmi, Ramani, Reddy - 2016 - The Research of Preprocessing and Pattern Discovery Techniques on Web Log Files(2).pdf:pdf}, +isbn = {9781467382861}, +journal = {Proceedings - 6th International Advanced Computing Conference, IACC 2016}, +keywords = {Asscociation rules,Graph models,Navigation patterns,Static Logs,Web log}, +pages = {139--145}, +publisher = {IEEE}, +title = {{The Research of Preprocessing and Pattern Discovery Techniques on Web Log Files}}, +year = {2016} +} +@inproceedings{Sinha2021, +abstract = {We are very familiar with the phrase 'change is the only constant' and same thing applicable for software industry also. In this new world of software industry' most of the Information technology companies are following a methodology' named Agile where the development work moves quickly. Nowadays very few companies are still following Traditional Waterfall Model as software development life cycle method. In software development life cycle one of the most important phase is quality assurance phase or testing phase. In this context we will be discussing how the software testing has been implemented and how it's going to work with agile methodology. Also, we will do a comparative analysis between Traditional Waterfall model Testing approach and Agile testing approach.}, +author = {Sinha, Abhiup and Das, Pallabi}, +booktitle = {2021 5th International Conference on Electronics, Materials Engineering {\&} Nano-Technology (IEMENTech)}, +doi = {10.1109/IEMENTech53263.2021.9614779}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Agile{\_}Methodology{\_}Vs.{\_}Traditional{\_}Waterfall{\_}SDLC{\_}A{\_}case{\_}study{\_}on{\_}Quality{\_}Assurance{\_}process{\_}in{\_}Software{\_}Industry.pdf:pdf}, +isbn = {978-1-6654-1803-4}, +keywords = {Agile,Software Development,Testing in Agile,Waterfall model}, +month = {sep}, +pages = {1--4}, +publisher = {IEEE}, +title = {{Agile Methodology Vs. Traditional Waterfall SDLC: A case study on Quality Assurance process in Software Industry}}, +url = {https://ieeexplore.ieee.org/document/9614779/}, +year = {2021} +} +@article{Vaarandi2015, +abstract = {Modern IT systems often produce large volumes of event logs, and event pattern discovery is an important log management task. For this purpose, data mining methods have been suggested in many previous works. In this paper, we present the LogCluster algorithm which implements data clustering and line pattern mining for textual event logs. The paper also describes an open source implementation of LogCluster.}, +author = {Vaarandi, Risto and Pihelgas, Mauno}, +doi = {10.1109/CNSM.2015.7367331}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Vaarandi, Pihelgas - 2015 - LogCluster - A data clustering and pattern mining algorithm for event logs.pdf:pdf}, +isbn = {9783901882777}, +journal = {Proceedings of the 11th International Conference on Network and Service Management, CNSM 2015}, +keywords = {data clustering,data mining,event log analysis,event log clustering,mining patterns from event logs}, +pages = {1--7}, +title = {{LogCluster - A data clustering and pattern mining algorithm for event logs}}, +year = {2015} +} +@article{Shahid2016, +abstract = {Change impact is an important issue in software maintenance phase. As retesting is required over a software change, there is a need to keep track of software impact associated with changes. Even a small software change can ripple through to cause a large unintended impact elsewhere in the system that makes it difficult to identify the affected functionalities. The impact after changes demands for a special traceability approach. This paper presents a new approach and prototype tool, Hybrid Coverage Analysis Tool (HYCAT), as a proof of concept to support the software manager or maintainers to manage impact analysis and its related traceability before and after a change in any software artifact. The proposed approach was then evaluated using a case study, On-Board Automobile (OBA), and experimentation. The traceability output before and after changes were produced and analyzed to capture impact analysis. The results of the evaluation show that the proposed approach has achieved some promising output and remarkable understanding as compared to existing approaches.}, +author = {Shahid, Muhammad and Ibrahim, Suhaimi}, +doi = {10.1109/IBCAST.2016.7429908}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Shahid, Ibrahim - 2016 - Change impact analysis with a software traceability approach to support software maintenance.pdf:pdf}, +isbn = {9781467391276}, +journal = {Proceedings of 2016 13th International Bhurban Conference on Applied Sciences and Technology, IBCAST 2016}, +keywords = {impact analysis,software change,software maintenance,software traceability}, +pages = {391--396}, +title = {{Change impact analysis with a software traceability approach to support software maintenance}}, +year = {2016} +} +@article{Zhu2019, +abstract = {Logs are imperative in the development and maintenance process of many software systems. They record detailed runtime information that allows developers and support engineers to monitor their systems and dissect anomalous behaviors and errors. The increasing scale and complexity of modern software systems, however, make the volume of logs explodes. In many cases, the traditional way of manual log inspection becomes impractical. Many recent studies, as well as industrial tools, resort to powerful text search and machine learning-based analytics solutions. Due to the unstructured nature of logs, a first crucial step is to parse log messages into structured data for subsequent analysis. In recent years, automated log parsing has been widely studied in both academia and industry, producing a series of log parsers by different techniques. To better understand the characteristics of these log parsers, in this paper, we present a comprehensive evaluation study on automated log parsing and further release the tools and benchmarks for easy reuse. More specifically, we evaluate 13 log parsers on a total of 16 log datasets spanning distributed systems, supercomputers, operating systems, mobile systems, server applications, and standalone software. We report the benchmarking results in terms of accuracy, robustness, and efficiency, which are of practical importance when deploying automated log parsing in production. We also share the success stories and lessons learned in an industrial application at Huawei. We believe that our work could serve as the basis and provide valuable guidance to future research and deployment of automated log parsing.}, +archivePrefix = {arXiv}, +arxivId = {1811.03509}, +author = {Zhu, Jieming and He, Shilin and Liu, Jinyang and He, Pinjia and Xie, Qi and Zheng, Zibin and Lyu, Michael R.}, +doi = {10.1109/ICSE-SEIP.2019.00021}, +eprint = {1811.03509}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Zhu et al. - 2019 - Tools and Benchmarks for Automated Log Parsing.pdf:pdf}, +isbn = {9781728117607}, +journal = {Proceedings - 2019 IEEE/ACM 41st International Conference on Software Engineering: Software Engineering in Practice, ICSE-SEIP 2019}, +keywords = {AIOps,anomaly detection,log analysis,log management,log parsing}, +pages = {121--130}, +publisher = {IEEE}, +title = {{Tools and Benchmarks for Automated Log Parsing}}, +year = {2019} +} +@article{Booch1986, +abstract = {Object-oriented development is a partial-lifecycle software development method in which the decomposition of a system is based upon the concept of an object. This method is fundamentally different from traditional functional approaches to design and serves to help manage the complexity of massive software-intensive systems. The paper examines the process of object-oriented development as well as the influences upon this approach from advances in abstraction mechanisms, programming languages, and hardware. The concept of an object is central to object-oriented development and so the properties of an object are discussed in detail. The paper concludes with an examination of the mapping of object-oriented techniques to Ada{\textregistered} using a design case study. {\textcopyright} 1986 IEEE}, +author = {Booch, Grady}, +doi = {10.1109/TSE.1986.6312937}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Object-oriented{\_}development.pdf:pdf}, +issn = {00985589}, +journal = {IEEE Transactions on Software Engineering}, +keywords = {Abstract data type,Ada,object,object-oriented development,software development method}, +number = {2}, +pages = {211--221}, +publisher = {IEEE}, +title = {{Object-Oriented Development}}, +volume = {SE-12}, +year = {1986} +} +@article{Hoda2017, +abstract = {Context A number of systematic literature reviews and mapping studies (SLRs) covering numerous primary research studies on various aspects of agile software development (ASD) exist. Objective The aim of this paper is to provide an overview of the SLRs on ASD research topics for software engineering researchers and practitioners. Method We followed the tertiary study guidelines by Kitchenham et al. to find SLRs published between late 1990s to December 2015. Results We found 28 SLRs focusing on ten different ASD research areas: adoption, methods, practices, human and social aspects, CMMI, usability, global software engineering (GSE), organizational agility, embedded systems, and software product line engineering. The number of SLRs on ASD topics, similar to those on software engineering (SE) topics in general, is on the rise. A majority of the SLRs applied standardized guidelines and the quality of these SLRs on ASD topics was found to be slightly higher for journal publications than for conferences. While some individuals and institutions seem to lead this area, the spread of authors and institutions is wide. With respect to prior review recommendations, significant progress was noticed in the area of connecting agile to established domains such as usability, CMMI, and GSE; and considerable progress was observed in focusing on management-oriented approaches as Scrum and sustaining ASD in different contexts such as embedded systems. Conclusion SLRs of ASD studies are on the rise and cover a variety of ASD aspects, ranging from early adoption issues to newer applications of ASD such as in product line engineering. ASD research can benefit from further primary and secondary studies on evaluating benefits and challenges of ASD methods, agile hybrids in large-scale setups, sustainability, motivation, teamwork, and project management; as well as a fresh review of empirical studies in ASD to cover the period post 2008.}, +author = {Hoda, Rashina and Salleh, Norsaremah and Grundy, John and Tee, Hui Mien}, +doi = {10.1016/j.infsof.2017.01.007}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Systematic{\_}literature{\_}reviews{\_}in{\_}global{\_}software{\_}development{\_}A{\_}tertiary{\_}study.pdf:pdf}, +isbn = {9781849195416}, +issn = {09505849}, +journal = {Information and Software Technology}, +keywords = {Agile software development,Mapping study,Systematic literature reviews,Tertiary study}, +pages = {60--70}, +title = {{Systematic literature reviews in agile software development: A tertiary study}}, +volume = {85}, +year = {2017} +} +@article{Razavi2008, +abstract = {The component-based nature of large industrial software systems that consist of a number of diverse collaborating applications, pose significant challenges with respect to system maintenance, monitoring, auditing, and diagnosing. In this context, a monitoring and diagnostic system interprets log data to recognize patterns of significant events that conform to specific Threat Models. Threat Models have been used by the software industry for analyzing and documenting a system's risks in order to understand a system's threat profile. In this paper, we propose a framework whereby patterns of significant events are represented as expressions of a specialized monitoring language that are used to annotate specific threat models. An approximate matching technique that is based on the Viterbi algorithm is then used to identify whether system generated events, fit the given patterns. The technique has been applied and evaluated considering threat models and monitoring policies in logs that have been obtained from multi-user MS-Windows{\textcopyright} based systems. {\textcopyright} 2008 IEEE.}, +author = {Razavi, Ali and Kontogiannis, Kostas}, +doi = {10.1109/COMPSAC.2008.81}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Razavi, Kontogiannis - 2008 - Pattern and policy driven log analysis for software monitoring.pdf:pdf}, +isbn = {9780769532622}, +issn = {07303157}, +journal = {Proceedings - International Computer Software and Applications Conference}, +pages = {108--111}, +title = {{Pattern and policy driven log analysis for software monitoring}}, +year = {2008} +} @article{Huser2012, abstract = {Process Mining: Discovery, Conformance and Enhancement of Business Processes Considerations}, author = {Huser, Vojtech}, @@ -1106,31 +1039,30 @@ @article{Huser2012 volume = {45}, year = {2012} } -@article{Dwyer2013, -abstract = {Security is one of the biggest concerns of any company that has an IT infrastructure. Windows event logs are a very useful source of data for security information, but sometimes can be nearly impossible to use due to the complexity of log data or the number of events generated per minute. For this reason, event log data must be automatically processed so that an administrator is given a list of events that actually need the administrator's attention. This has been standard in intrusion detection systems for many years to find anomalies in network traffic, but has not been common in event log processing. This paper will adapt these intrusion detection techniques for Windows event log data sets to find anomalies in these log data sets. ?? 2013 ICST.}, -author = {Dwyer, John and Truta, Traian Marius}, -doi = {10.4108/icst.collaboratecom.2013.254136}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Dwyer, Truta - 2013 - Finding anomalies in windows event logs using standard deviation.pdf:pdf}, -isbn = {9781936968923}, -journal = {Proceedings of the 9th IEEE International Conference on Collaborative Computing: Networking, Applications and Worksharing, COLLABORATECOM 2013}, -keywords = {Anomaly Detection,Standard Deviation,Windows Event Logs}, -pages = {563--570}, -title = {{Finding anomalies in windows event logs using standard deviation}}, -year = {2013} -} -@article{Tian2017, -abstract = {A web service reliability test method for C/S architecture software based on log analysis is presented in this paper. In this method, the software usage model is constructed automatically to describe the real situation on the users' access to the web service by Markov chain. The test cases are generated according to Random Walk and applied to software reliability test. In the experiment process, MTBF (focusing on server crash) was chosen to be the software reliability evaluation index. Through the testing and analysis of a real web software, MTBF obtained by testing result is similar to that from the realistic log, and the web service reliability test method is validated.}, -author = {Tian, Xuetao and Li, Honghui and Liu, Feng}, -doi = {10.1109/QRS-C.2017.38}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Tian, Li, Liu - 2017 - Web Service Reliability Test Method Based on Log Analysis.pdf:pdf}, +@article{Hemmati2017, +abstract = {In digital health communication domain, the software is usually offered as a service (SaaS) which mandates attracting customers' satisfaction within each use of service. We report a case study in this domain which provides secure web based mail services that establish a secure bridge between patients and medical personnel. In this context, we explore service usage to guide value prediction for existing and prospective customers. Understanding the behavior of clients can help optimize services by providing them to users based on past usage trends or patterns. Therefore, service usage analysis in digital health products can pave the road for efficient communications and feature utilization in software in the health-care market.}, +author = {Hemmati, Ashkan and Carlson, Chris and Nayebi, Maleknaz and Ruhe, Guenther and Saunders, Chad}, +doi = {10.1109/QRS-C.2017.95}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Analysis{\_}of{\_}Software{\_}Service{\_}Usage{\_}in{\_}Healthcare{\_}Communication{\_}Services.pdf:pdf}, isbn = {9781538620724}, journal = {Proceedings - 2017 IEEE International Conference on Software Quality, Reliability and Security Companion, QRS-C 2017}, -keywords = {Log Analysis,Markov Usage Model,Reliability Test,Test Cases,Web Service}, -pages = {195--199}, +keywords = {Feature usage analysis,product value estimation,requirement engineering,software as a service}, +pages = {565--566}, publisher = {IEEE}, -title = {{Web Service Reliability Test Method Based on Log Analysis}}, +title = {{Analysis of Software Service Usage in Healthcare Communication Services}}, year = {2017} } +@article{Alenezi2016, +abstract = {See, stats, and : https : / / www . researchgate . net / publication / 296060207 Does Software ? Evidences - Source Article CITATIONS 0 READS 38 2 : Some : Develop (UX) Evaluation Architectural - Source Mamdouh Prince 32 SEE Mohammad . Zarour Prince 32 SEE All . Zarour . The . Abstract Throughout the software evolution , several maintenance actions such as adding new fea - tures , fixing problems , improving the design might negatively or positively affect the software design quality . Quality degradation , if not handled in the right time , can accumulate and cause serious problems for future maintenance effort . Several researchers considered modu - larity as one of the success factors of Open Source Software (OSS) Projects . The modularity of these systems is influenced by some software metrics such as size , complexity , cohesion , and coupling . In this work , we study the modularity evolution of four open - source systems by answering two main research questions namely : what measures can be used to measure the modularity level of software and secondly , did the modularity level for the selected open source software improves over time . By investigating the modularity measures , we have identified the main measures that can be used to measure software modularity . Based on our analysis , the modularity of these two systems is not improving over time . However , the defect density is improving over time .}, +author = {Alenezi, Mamdouh and Zarour, Mohammad}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Alenezi, Zarour - 2016 - Does Software Structures Quality Improve over Software Evolution Evidences from Open - Source Projects.pdf:pdf}, +journal = {Special issue on “Computing Applications and Data Mining” International Journal of Computer Science and Information Security (IJCSIS}, +number = {1}, +pages = {61--75}, +title = {{Does Software Structures Quality Improve over Software Evolution ? Evidences from Open - Source Projects}}, +volume = {14}, +year = {2016} +} @article{Araujo2021, abstract = {The increasing use of computational systems has highlighted concerns about attributes that may influence the quality of service, such as performance, availability, reliability, and maintenance capacity. Failures in the software development process may impact these attributes. Flawed code and overall software misdesign may cause internal errors, leading to system malfunction. Some errors might be identified and fixed during the software testing process. However, other errors may manifest only during the production stage. This is the case of the software aging phenomenon, which is related to the progressive degradation that a software performance or reliability suffers during its operational life. This paper proposes a methodology for software maintenance that is tailored to identify, correct, and mitigate the software aging effects. If the source code can be modified and a new version deployed with minimal impact, thus data from aging detection is used for corrective maintenance, i.e., for fixing the bug that causes the aging effects. If the software cannot be fixed nor its version updated without long system interruption or other bad consequences, then our approach can mitigate the aging effects, in a preventive maintenance to avoid service outages. The proposed methodology is validated through both Stochastic Petri Net (SPN) models and experiments in a controlled environment. The model evaluation considering a hybrid maintenance routine (preventive and corrective) yielded an availability of 99.82{\%}, representing an annual downtime of 15.9 hours. By contrast, the baseline scenario containing only reactive maintenance (i.e., repairing only after failure) had more than 1342 hours of annual downtime-80 times higher than the proposed approach.}, author = {Araujo, Jean and Melo, Carlos and Oliveira, Felipe and Pereira, Paulo and Matos, Rubens}, @@ -1142,44 +1074,56 @@ @article{Araujo2021 title = {{A Software Maintenance Methodology: An Approach Applied to Software Aging}}, year = {2021} } -@article{Hoda2017, -abstract = {Context A number of systematic literature reviews and mapping studies (SLRs) covering numerous primary research studies on various aspects of agile software development (ASD) exist. Objective The aim of this paper is to provide an overview of the SLRs on ASD research topics for software engineering researchers and practitioners. Method We followed the tertiary study guidelines by Kitchenham et al. to find SLRs published between late 1990s to December 2015. Results We found 28 SLRs focusing on ten different ASD research areas: adoption, methods, practices, human and social aspects, CMMI, usability, global software engineering (GSE), organizational agility, embedded systems, and software product line engineering. The number of SLRs on ASD topics, similar to those on software engineering (SE) topics in general, is on the rise. A majority of the SLRs applied standardized guidelines and the quality of these SLRs on ASD topics was found to be slightly higher for journal publications than for conferences. While some individuals and institutions seem to lead this area, the spread of authors and institutions is wide. With respect to prior review recommendations, significant progress was noticed in the area of connecting agile to established domains such as usability, CMMI, and GSE; and considerable progress was observed in focusing on management-oriented approaches as Scrum and sustaining ASD in different contexts such as embedded systems. Conclusion SLRs of ASD studies are on the rise and cover a variety of ASD aspects, ranging from early adoption issues to newer applications of ASD such as in product line engineering. ASD research can benefit from further primary and secondary studies on evaluating benefits and challenges of ASD methods, agile hybrids in large-scale setups, sustainability, motivation, teamwork, and project management; as well as a fresh review of empirical studies in ASD to cover the period post 2008.}, -author = {Hoda, Rashina and Salleh, Norsaremah and Grundy, John and Tee, Hui Mien}, -doi = {10.1016/j.infsof.2017.01.007}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Systematic{\_}literature{\_}reviews{\_}in{\_}global{\_}software{\_}development{\_}A{\_}tertiary{\_}study.pdf:pdf}, -isbn = {9781849195416}, -issn = {09505849}, -journal = {Information and Software Technology}, -keywords = {Agile software development,Mapping study,Systematic literature reviews,Tertiary study}, -pages = {60--70}, -title = {{Systematic literature reviews in agile software development: A tertiary study}}, -volume = {85}, +@article{Port2017, +abstract = {NASA has been successfully sustaining the continuous operation of its critical navigation software systems for over 12 years. To accomplish this, NASA scientists must continuously monitor their process, report on current system quality, forecast maintenance effort, and sustain required staffing levels. This report presents some examples of the use of a robust software metrics and analytics program that enables actionable strategic maintenance management of a critical system (Monte) in a timely, economical, and risk-controlled fashion. This article is part of a special issue on Actionable Analytics for Software Engineering.}, +author = {Port, Dan and Taber, Bill}, +doi = {10.1109/MS.2017.4541055}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Port, Taber - 2017 - Actionable Analytics for Strategic Maintenance of Critical Software An Industry Experience Report.pdf:pdf}, +issn = {07407459}, +journal = {IEEE Software}, +keywords = {Monte,NASA,critical systems,navigation systems,reliability,software analytics,software development,software engineering,software maintenance}, +number = {1}, +pages = {58--63}, +publisher = {IEEE}, +title = {{Actionable Analytics for Strategic Maintenance of Critical Software: An Industry Experience Report}}, +volume = {35}, year = {2017} } -@article{Garlan1999, -abstract = {Over the past decade software architecture has received increasing attention as an important subfield of software engineering. During that time there has been considerable progress in developing the technological and methodological base for treating architectural design as an engineering discipline. However, much remains to be done to achieve that goal. Moreover, the changing face of technology raises a number of new challenges for software architecture. This paper examines some of the important trends of software architecture in research and practice, and speculates on the important emerging trends, challenges, and aspirations.}, -author = {Garlan, David}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Garlan - 1999 - Software Architecture a Roadmap David Garlan.pdf:pdf}, -isbn = {1581132530}, -journal = {Design}, -keywords = {software,software architecture,software design}, -title = {{Software Architecture: a Roadmap David Garlan}}, -year = {1999} +@inproceedings{Rong2020, +abstract = {Background: Logs provide crucial information to understand the dynamic behavior of software systems in modern software development and maintenance. Usually, logs are produced by log statements which will be triggered and executed under certain conditions. However, current studies paid very limited attention to developers' Intentions and Concerns (IC) on logging practice, leading uncertainty that whether the developers' IC are properly reflected by log statements and questionable capability to capture the expected information of system behaviors in logs. Objective: This study aims to reveal the status of developers' IC on logging practice and more importantly, how the IC are properly reflected in software source code in real-world software development. Method: We collected evidence from two sources of a series of interviews and source code analysis which are conducted in a big-data company, followed by consolidation and analysis of the evidence. Results: Major gaps and inconsistencies have been identified between the developers' IC and real log statements in source code. Many code snippets contained no log statements that the interviewees claimed to have inserted. Conclusion: Developers' original IC towards logging practice are usually poorly realized, which inevitably impacted the motivation and purpose to conduct this practice.}, +author = {Rong, Guoping and Xu, Yangchen and Gu, Shenghui and Zhang, He and Shao, Dong}, +booktitle = {Proceedings - 2020 IEEE International Conference on Software Maintenance and Evolution, ICSME 2020}, +doi = {10.1109/ICSME46990.2020.00012}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Can{\_}You{\_}Capture{\_}Information{\_}As{\_}You{\_}Intend{\_}To{\_}A{\_}Case{\_}Study{\_}on{\_}Logging{\_}Practice{\_}in{\_}Industry.pdf:pdf}, +isbn = {9781728156194}, +keywords = {developer,inconsistencies,intentions and concerns,logging practice}, +month = {sep}, +pages = {12--22}, +publisher = {Institute of Electrical and Electronics Engineers Inc.}, +title = {{Can You Capture Information As You Intend To? A Case Study on Logging Practice in Industry}}, +year = {2020} +} +@article{IEEEStandard2011, +author = {{IEEE Standard}}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/ISOIECIEEE{\textcopyright} Std. 42012011 - 2011 - INTERNATIONAL STANDARD ISO IEC IEEE Systems and software engineering — agile environment.pdf:pdf}, +journal = {Ieee Standards}, +title = {{INTERNATIONAL STANDARD ISO / IEC / IEEE Systems and software engineering - Architecture description}}, +volume = {2011}, +year = {2011} } -@article{Booch1986, -abstract = {Object-oriented development is a partial-lifecycle software development method in which the decomposition of a system is based upon the concept of an object. This method is fundamentally different from traditional functional approaches to design and serves to help manage the complexity of massive software-intensive systems. The paper examines the process of object-oriented development as well as the influences upon this approach from advances in abstraction mechanisms, programming languages, and hardware. The concept of an object is central to object-oriented development and so the properties of an object are discussed in detail. The paper concludes with an examination of the mapping of object-oriented techniques to Ada{\textregistered} using a design case study. {\textcopyright} 1986 IEEE}, -author = {Booch, Grady}, -doi = {10.1109/TSE.1986.6312937}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Object-oriented{\_}development.pdf:pdf}, -issn = {00985589}, -journal = {IEEE Transactions on Software Engineering}, -keywords = {Abstract data type,Ada,object,object-oriented development,software development method}, -number = {2}, -pages = {211--221}, +@article{Ren2011, +abstract = {Software maintenance is an important stage of software life cycle, according to the problems of software maintenance process model, research software maintenance process model, focus on software after delivery to retire between the software maintenance activities. Firstly, descript of the eight software maintenance process; then, research software maintenance process model of the structure and content, including the rapid change model, Boehm model, IEEE model, iterative enhancement four models; finally, analyze to the four models of software maintenance process, provide the basis for the selection of the model. This study's content includes the improvement of software maintenance process, guide maintenance activities, improve the quality of software maintenance, ensure the normal application software, which has important theoretical and practical significance. {\textcopyright} 2011 IEEE.}, +author = {Ren, Yongchang and Tao, Xing and Liu, Zhongjing and Chen, Xiaoji}, +doi = {10.1109/ICIII.2011.324}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Yongchang et al. - 2011 - Software Maintenance Process Model and Contrastive Analysis.pdf:pdf}, +isbn = {9780769545233}, +journal = {Proceedings - 2011 4th International Conference on Information Management, Innovation Management and Industrial Engineering, ICIII 2011}, +keywords = {Comparative analysis,Maintenance process,Process model,Software maintenance}, +pages = {169--172}, publisher = {IEEE}, -title = {{Object-Oriented Development}}, -volume = {SE-12}, -year = {1986} +title = {{Software maintenance process model and contrastive analysis}}, +volume = {3}, +year = {2011} } @article{Levin2019, abstract = {Lehman's Laws teach us that a software system will become progressively less satisfying to its users over time, unless it is continually adapted to meet new needs. A line of previous works sought to better understand software maintenance by studying how commits can be classified into three main software maintenance activities. Corrective: fault fixing; Perfective: system improvements; Adaptive: new feature introduction. In this work we suggest visualizations for exploring software maintenance activities in both project and individual developer scopes. We demonstrate our approach using a prototype we have built using the Shiny R framework. In addition, we have also published our prototype as an online demo. This demo allows users to explore the maintenance activities of a number of popular open source projects. We believe that the visualizations we provide can assist practitioners in monitoring and maintaining the health of software projects. In particular, they can be useful for identifying general imbalances, peaks, deeps and other anomalies in projects' and developers' maintenance activities.}, @@ -1197,84 +1141,155 @@ @article{Levin2019 title = {{Visually exploring software maintenance activities}}, year = {2019} } -@article{Kumar2013, -abstract = {Purpose - The purpose of this paper is to provide an overview of research and development in the measurement of maintenance performance. It considers the problems of various measuring parameters and comments on the lack of structure in and references for the measurement of maintenance performance. The main focus is to determine how value can be created for organizations by measuring maintenance performance, examining such maintenance strategies as condition-based maintenance, reliability-centred maintenance, e-maintenance, etc. In other words, the objectives are to find frameworks or models that can be used to evaluate different maintenance strategies and determine the value of these frameworks for an organization. Design/methodology/approach - A state-of-the-art literature review has been carried out to answer the following two research questions. First, what approaches and techniques are used for maintenance performance measurement (MPM) and which MPM techniques are optimal for evaluating maintenance strategies? Second, in general, how can MPM create value for organizations and, more specifically, which system of measurement is best for which maintenance strategy? Findings - The body of knowledge on maintenance performance is both quantitatively and qualitatively based. Quantitative approaches include economic and technical ratios, value-based and balanced scorecards, system audits, composite formulations, and statistical and partial maintenance productivity indices. Qualitative approaches include human factors, amongst other aspects. Qualitatively based approaches are adopted because of the inherent limitations of effectively measuring a complex function such as maintenance through quantitative models. Maintenance decision makers often come to the best conclusion using heuristics, backed up by qualitative assessment, supported by quantitative measures. Both maintenance performance perspectives are included in this overview. Originality/value - A comprehensive review of maintenance performance metrics is offered, aiming to give, in a condensed form, an extensive introduction to MPM and a presentation of the state of the art in this field. {\textcopyright} Emerald Group Publishing Limited.}, -author = {Kumar, Uday and Galar, Diego and Parida, Aditya and Stenstr{\"{o}}m, Christer and Berges, Luis}, -doi = {10.1108/JQME-05-2013-0029}, -editor = {Kumar, Uday}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Kumar et al. - 2013 - Maintenance performance metrics A state-of-the-art review.pdf:pdf}, -issn = {1355-2511}, -journal = {Journal of Quality in Maintenance Engineering}, -keywords = {Framework,Hierarchy,Indicators,Key performance indicators,Maintenance,Maintenance performance measurement,Metrics,Performance,Performance measurement}, -month = {aug}, -number = {3}, -pages = {233--277}, -title = {{Maintenance performance metrics: a state‐of‐the‐art review}}, -volume = {19}, +@article{Zhuo1993, +abstract = {Software metrics are used to quantitatively characterize the essential features of software. The paper investigates the use of metrics in assessing software maintainability by presenting and comparing seven software maintainability assessment models. Eight software systems were used for initial construction and calibrating the automated assessment models, and an additional six software systems were used for testing the results. A comparison was made between expert software engineers' subjective assessment of the 14 individual software systems and the maintainability indices calculated by the seven models based on complexity metrics automatically derived from those systems. Initial tests show very high correlations between the automated assessment techniques and the subjective expert evaluations.}, +author = {Zhuo, Fang and Lowther, Bruce and Oman, Paul and Hagemeister, Jack}, +doi = {10.1109/METRIC.1993.263800}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Constructing{\_}and{\_}testing{\_}software{\_}maintainability{\_}assessment{\_}models.pdf:pdf;:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Zhuo et al. - 1993 - Constructing and testing software maintainability assessment models.pdf:pdf}, +isbn = {0818637404}, +journal = {Proceedings - 1st International Software Metrics Symposium, METRIC 1993}, +pages = {61--70}, +title = {{Constructing and testing software maintainability assessment models}}, +year = {1993} +} +@article{Syer2013, +abstract = {Load tests ensure that software systems are able to perform under the expected workloads. The current state of load test analysis requires significant manual review of performance counters and execution logs, and a high degree of system-specific expertise. In particular, memory-related issues (e.g., memory leaks or spikes), which may degrade performance and cause crashes, are difficult to diagnose. Performance analysts must correlate hundreds of megabytes or gigabytes of performance counters (to understand resource usage) with execution logs (to understand system behaviour). However, little work has been done to combine these two types of information to assist performance analysts in their diagnosis. We propose an automated approach that combines performance counters and execution logs to diagnose memory-related issues in load tests. We perform three case studies on two systems: one open-source system and one large-scale enterprise system. Our approach flags {\textless}0.1{\%} of the execution logs with a precision {\textgreater}80{\%}. {\textcopyright} 2013 IEEE.}, +author = {Syer, Mark D. and Jiang, Zhen Ming and Nagappan, Meiyappan and Hassan, Ahmed E. and Nasser, Mohamed and Flora, Parminder}, +doi = {10.1109/ICSM.2013.22}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Syer et al. - 2013 - Leveraging performance counters and execution logs to diagnose memory-related performance issues.pdf:pdf}, +journal = {IEEE International Conference on Software Maintenance, ICSM}, +keywords = {Execution Logs,Load Testing,Performance Counters,Performance Engineering}, +pages = {110--119}, +publisher = {IEEE}, +title = {{Leveraging performance counters and execution logs to diagnose memory-related performance issues}}, year = {2013} } -@article{Krol2008, -author = {Krol, D and Scigajlo, M and Trawi{\'{n}}ski, Bogda}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Wang et al. - 2017 - Constructing and testing software maintainability assessment models.pdf:pdf}, -isbn = {9781424420964}, -journal = {Machine Learning}, -keywords = {clustering,hcm,server log,user activity,web system}, -number = {July}, -pages = {12--15}, -title = {{I Vestigatio of I Ter Et System User Behaviour Usi G Cluster a Alysis}}, -year = {2008} +@article{Pathan2014, +abstract = {Data mining is the process of finding correlations in the relational databases. There are different techniques for identifying malicious database transactions. Many existing approaches which profile is SQL query structures and database user activities to detect intrusion, the log mining approach is the automatic discovery for identifying anomalous database transactions. Mining of the Data is very helpful to end users for extracting useful business information from large database. Multi-level and multi-dimensional data mining are employed to discover data item dependency rules, data sequence rules, domain dependency rules, and domain sequence rules from the database log containing legitimate transactions. Database transactions that do not comply with the rules are identified as malicious transactions. The log mining approach can achieve desired true and false positive rates when the confidence and support are set up appropriately. The implemented system incrementally maintain the data dependency rule sets and optimize the performance of the intrusion detection process. {\textcopyright} 2014 IEEE.}, +author = {Pathan, Apashabi Chandkhan and Potey, Madhuri A.}, +doi = {10.1109/ICESC.2014.50}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Detection{\_}of{\_}Malicious{\_}Transaction{\_}in{\_}Database{\_}Using{\_}Log{\_}Mining{\_}Approach.pdf:pdf}, +isbn = {9781479921027}, +journal = {Proceedings - International Conference on Electronic Systems, Signal Processing, and Computing Technologies, ICESC 2014}, +keywords = {Data Mining,Database security,Intrusion Detection}, +pages = {262--265}, +publisher = {IEEE}, +title = {{Detection of malicious transaction in database using log mining approach}}, +year = {2014} } -@article{VanDerAalst2011, -abstract = {Process mining techniques enable process-centric analytics through automated process discovery, conformance checking, and model enhancement. {\textcopyright} 1970-2012 IEEE.}, -author = {{Van Der Aalst}, W.}, -doi = {10.1109/MC.2011.384}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Aalst - 2011 - Using Process Mining to Bridge the Gap between BI and BPM.pdf:pdf}, -issn = {00189162}, -journal = {Computer}, -keywords = {Business intelligence,Business process management,Discovery analytics,Process mining}, -number = {12}, -pages = {77--80}, -title = {{Using process mining to bridge the gap between BI and BPM}}, -volume = {44}, -year = {2011} +@article{Garlan1999, +abstract = {Over the past decade software architecture has received increasing attention as an important subfield of software engineering. During that time there has been considerable progress in developing the technological and methodological base for treating architectural design as an engineering discipline. However, much remains to be done to achieve that goal. Moreover, the changing face of technology raises a number of new challenges for software architecture. This paper examines some of the important trends of software architecture in research and practice, and speculates on the important emerging trends, challenges, and aspirations.}, +author = {Garlan, David}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Garlan - 1999 - Software Architecture a Roadmap David Garlan.pdf:pdf}, +isbn = {1581132530}, +journal = {Design}, +keywords = {software,software architecture,software design}, +title = {{Software Architecture: a Roadmap David Garlan}}, +year = {1999} } -@article{DeLeon-Sigg2020, -abstract = {Technical debt concept has been in use since the 90s's decade. Several processes, techniques and tools, such as those related with software maintenance and risk control, are used to manage, prevent, measure and reduce technical debt. Technical debt management includes activities to identify, measure, prioritize, repay, and monitor it, but one of the main issues related with management resides in the complexity to make technical debt visible to organizations. In this paper is presented the application of the Normative Process Framework to make technical debt visible with a large system developed by students of software engineering. The Normative Process Framework is used in conjunction with a process to find technical debt and document it in a simple format. Results show how technical debt was made visible for that system in a simplified way, by using documentation generated during development, and considering not only code, but also other software assets. Once technical debt is made visible is easier to evaluate and prioritize it, to establish a convenient set of actions to control it.}, -author = {{De Leon-Sigg}, Maria and Vazquez-Reyes, Sodel and Rodriguez-Avila, Daniel}, -doi = {10.1109/CONISOFT50191.2020.00022}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/De Leon-Sigg, Vazquez-Reyes, Rodriguez-Avila - 2020 - Towards the use of a framework to make technical debt visible.pdf:pdf;:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Towards{\_}the{\_}Use{\_}of{\_}a{\_}Framework{\_}to{\_}Make{\_}Technical{\_}Debt{\_}Visible.pdf:pdf}, -isbn = {9781728184500}, -journal = {Proceedings - 2020 8th Edition of the International Conference in Software Engineering Research and Innovation, CONISOFT 2020}, -keywords = {Framework,Technical debt,Technical debt management,Technical debt visibility}, -pages = {86--92}, -title = {{Towards the use of a framework to make technical debt visible}}, -year = {2020} +@inproceedings{Jailia2016, +author = {Jailia, Manisha and Kumar, Ashok and Agarwal, Manisha and Sinha, Isha}, +booktitle = {2016 International Conference on ICT in Business Industry {\&} Government (ICTBIG)}, +doi = {10.1109/ICTBIG.2016.7892651}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Behavior{\_}of{\_}MVC{\_}Model{\_}View{\_}Controller{\_}based{\_}Web{\_}Application{\_}developed{\_}in{\_}PHP{\_}and{\_}.NET{\_}framework.pdf:pdf}, +isbn = {978-1-5090-5515-9}, +keywords = {- model}, +pages = {1--5}, +publisher = {IEEE}, +title = {{Behavior of MVC (Model View Controller) based Web Application developed in PHP and .NET framework}}, +year = {2016} } -@article{Dhanalakshmi2016, -abstract = {The increased on-line applications are leading to exponential growth of the web content. Most of the business organizations are interested to know the web user behavior to enhance their business. In this context, users navigation in static and dynamic web applications plays an important role in understanding user's interests. The static mining techniques may not be suitable as it is for dynamic web log files and decision making. Traditional web log preprocessing approaches and weblog usage patterns have limitations to analyze the content relationship with the browsing history This paper, focuses on various static web log preprocessing and mining techniques and their applicable limitations for dynamic web mining.}, -author = {Dhanalakshmi, P. and Ramani, K. and Reddy, B. Eswara}, -doi = {10.1109/IACC.2016.35}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Dhanalakshmi, Ramani, Reddy - 2016 - The Research of Preprocessing and Pattern Discovery Techniques on Web Log Files(2).pdf:pdf}, -isbn = {9781467382861}, -journal = {Proceedings - 6th International Advanced Computing Conference, IACC 2016}, -keywords = {Asscociation rules,Graph models,Navigation patterns,Static Logs,Web log}, -pages = {139--145}, +@article{Ping2010, +abstract = {Software maintainability is one important aspect in the evaluation of software evolution of a software product. Due to the complexity of tracking maintenance behaviors, it is difficult to accurately predict the cost and risk of maintenance after delivery of software products. In an attempt to address this issue quantitatively, software maintainability is viewed as an inevitable evolution process driven by maintenance behaviors, given a health index at the time when a software product are delivered. A Hidden Markov Model (HMM) is used to simulate the maintenance behaviors shown as their possible occurrence probabilities. And software metrics is the measurement of the quality of a software product and its measurement results of a product being delivered are combined to form the health index of the product. The health index works as a weight on the process of maintenance behavior over time. When the occurrence probabilities of maintenance behaviors reach certain number which is reckoned as the indication of the deterioration status of a software product, the product can be regarded as being obsolete. Longer the time, better the maintainability would be. {\textcopyright} 2010 IEEE.}, +author = {Ping, Liang}, +doi = {10.1109/IFITA.2010.294}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Ping - 2010 - A quantitative approach to software maintainability prediction.pdf:pdf}, +isbn = {9780769541150}, +journal = {Proceedings - 2010 International Forum on Information Technology and Applications, IFITA 2010}, +keywords = {Hidden markov model,Software maintainability,Software metrics}, +pages = {105--108}, publisher = {IEEE}, -title = {{The Research of Preprocessing and Pattern Discovery Techniques on Web Log Files}}, +title = {{A quantitative approach to software maintainability prediction}}, +volume = {1}, +year = {2010} +} +@article{Kherbouche2017, +abstract = {It is widely observed that the poor event logs quality poses a significant challenge to the process mining project both in terms of choice of process mining algorithms and in terms of the quality of the discovered process model. Therefore, it is important to control the quality of event logs prior to conducting a process mining analysis. In this paper, we propose a qualitative model which aims to assess the quality of event logs before applying process mining algorithms. Our ultimate goal is to give process mining practitioners an overview of the quality of event logs which can help to indicate whether the event log quality is good enough to proceed to process mining and in this case, to suggest both the needed preprocessing steps and the process mining algorithm that is most tailored under such a circumstance. The qualitative model has been evaluated using both artificial and real-life case studies.}, +author = {Kherbouche, Mohammed Oussama and Laga, Nassim and Masse, Pierre Aymeric}, +doi = {10.1109/SSCI.2016.7849946}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Kherbouche, Laga, Masse - 2017 - Towards a better assessment of event logs quality.pdf:pdf}, +isbn = {9781509042401}, +journal = {2016 IEEE Symposium Series on Computational Intelligence, SSCI 2016}, +keywords = {event logs,process mining,process mining algorithms,qualitative model}, +publisher = {IEEE}, +title = {{Towards a better assessment of event logs quality}}, +year = {2017} +} +@inproceedings{Bogatinovski2023, +abstract = {Logging in software development plays a crucial role in bug-fixing, maintaining the code and operating the application. Logs are hints created by human software developers that aim to help human developers and operators in identifying root causes for application bugs or other misbehaviour types. They also serve as a bridge between the Devs and the Ops, allowing the exchange of information. The rise of the DevOps paradigm with the CI/CD pipelines led to a significantly higher number of deployments per month and consequently increased the logging requirements. In response, AI-enabled methods for IT operation (AIOps) are introduced to automate the testing and run-time fault tolerance to a certain extent. However, using logs tailored for human understanding to learn (automatic) AI methods poses an ill-defined problem: AI algorithms need no hints but structured, precise and indicative data. Until now, AIOps researchers adapt the AI algorithms to the properties of the existing human-centred data (e.g., log sentiment), which are not always trivial to model. By pointing out the discrepancy, we envision that there exists an alternative approach: the logging can be adapted such that the produced logs are better tailored towards the strengths of the AI-enabled methods. In response, in this vision paper, we introduce auto-logging, which devises the idea of how to automatically insert log instructions into the code that can better suit AI-enabled methods as end-log consumers.}, +author = {Bogatinovski, Jasmin and Kao, Odej}, +booktitle = {2023 IEEE/ACM 45th International Conference on Software Engineering: New Ideas and Emerging Results (ICSE-NIER)}, +doi = {10.1109/ICSE-NIER58687.2023.00023}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Auto-Logging{\_}AI-centred{\_}Logging{\_}Instrumentation.pdf:pdf}, +isbn = {979-8-3503-0039-0}, +issn = {02705257}, +keywords = {AIOps,logging,software engineering}, +month = {may}, +pages = {95--100}, +publisher = {IEEE}, +title = {{Auto-Logging: AI-centred Logging Instrumentation}}, +url = {https://ieeexplore.ieee.org/document/10173904/}, +year = {2023} +} +@article{Vijayasarathy2016, +abstract = {Organizations can choose from software development methodologies ranging from traditional to agile approaches. Researchers surveyed project managers and other team members about their choice of methodologies. The results indicate that although agile methodologies such as Agile Unified Process and Scrum are more prevalent than 10 years ago, traditional methodologies, including the waterfall model, are still popular. Organizations are also taking a hybrid approach, using multiple methodologies on projects. Furthermore, their choice of methodologies is associated with certain organizational, project, and team characteristics.}, +author = {Vijayasarathy, Leo R. and Butler, Charles W.}, +doi = {10.1109/MS.2015.26}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Vijayasarathy, Butler - 2016 - Choice of Software Development Methodologies Do Organizational, Project, and Team Characteristics Matter.pdf:pdf}, +issn = {07407459}, +journal = {IEEE Software}, +keywords = {agile methodologies,hybrid software development,organizational characteristics,project characteristics,software development,software development methodologies,software engineering,team characteristics,traditional methodologies,waterfall model}, +number = {5}, +pages = {86--94}, +publisher = {IEEE}, +title = {{Choice of Software Development Methodologies: Do Organizational, Project, and Team Characteristics Matter?}}, +volume = {33}, year = {2016} } -@article{Li2018, -abstract = {This is an extended abstract of a paper published in the Empirical Software Engineering journal. The original paper is communicated by Mark Grechanik. The paper empirically studied how developers assign log levels to their logging statements and proposed an automated approach to help developers determine the most appropriate log level when they add a new logging statement. We analyzed the development history of four open source projects (Hadoop, Directory Server, Hama, and Qpid). We found that our automated approach can accurately suggest the levels of logging statements with an AUC of 0.75 to 0.81. We also found that the characteristics of the containing block of a newly-added logging statement, the existing logging statements in the containing source code file, and the content of the newly-added logging statement play important roles in determining the appropriate log level for that logging statement.}, -author = {Li, Heng and Shang, Weiyi and Hassan, Ahmed E.}, -doi = {10.1109/SANER.2018.8330234}, -file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Li, Shang, Hassan - 2018 - Which log level should developers choose for a new logging statement (journal-first abstract).pdf:pdf}, -isbn = {9781538649695}, -journal = {25th IEEE International Conference on Software Analysis, Evolution and Reengineering, SANER 2018 - Proceedings}, -keywords = {log level,ordinal regression model,software logging}, -number = {4}, -pages = {468}, +@article{Al-Saiyd2015, +author = {Al-Saiyd, Nedhal and Zriqat, Esraa}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/EJSR-AnalyzingtheimpactofRequirementChangingonSoftwareDesign.pdf:pdf}, +journal = {European Journal of Scientific Research}, +number = {February}, +title = {{Analyzing the Impact of Requirement Changing on Software Design}}, +volume = {136}, +year = {2015} +} +@article{Cinque2013, +abstract = {Event logs have been widely used over the last three decades to analyze the failure behavior of a variety of systems. Nevertheless, the implementation of the logging mechanism lacks a systematic approach and collected logs are often inaccurate at reporting software failures: This is a threat to the validity of log-based failure analysis. This paper analyzes the limitations of current logging mechanisms and proposes a rule-based approach to make logs effective to analyze software failures. The approach leverages artifacts produced at system design time and puts forth a set of rules to formalize the placement of the logging instructions within the source code. The validity of the approach, with respect to traditional logging mechanisms, is shown by means of around 12,500 software fault injection experiments into real-world systems. {\textcopyright} 2012 IEEE.}, +author = {Cinque, Marcello and Cotroneo, Domenico and Pecchia, Antonio}, +doi = {10.1109/TSE.2012.67}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Event{\_}Logs{\_}for{\_}the{\_}Analysis{\_}of{\_}Software{\_}Failures{\_}A{\_}Rule-Based{\_}Approach.pdf:pdf}, +issn = {00985589}, +journal = {IEEE Transactions on Software Engineering}, +keywords = {Event log,error detection,logging mechanism,rule-based logging,software failures}, +number = {6}, +pages = {806--821}, publisher = {IEEE}, -title = {{Which log level should developers choose for a new logging statement? (journal-first abstract)}}, -volume = {2018-March}, +title = {{Event logs for the analysis of software failures: A rule-based approach}}, +volume = {39}, +year = {2013} +} +@article{Rong2018a, +abstract = {Background: Logs are the footprints that software systems produce during runtime, which can be used to understand the dynamic behavior of these software systems. To generate logs, logging practice is accepted by developers to place logging statements in the source code of software systems. Compared to the great number of studies on log analysis, the research on logging practice is relatively scarce, which raises a very critical question, i.e. as the original intention, can current logging practice support capturing the behavior of software systems effectively? Aims: To answer this question, we first need to understand how logging practices are implemented these software projects. Method: In this paper, we carried out an empirical study to explore the logging practice in open source software projects so as to establish a basic understanding on how logging practice is applied in real world software projects. The density, log level (what to log?) and context (where to log?) are measured for our study. Results: Based on the evidence we collected in 28 top open source projects, we find the logging practice is adopted highly inconsistently among different developers both across projects and even within one project in terms of the density and log levels of logging statements. However, the choice of what context the logging statements to place is consistent to a fair degree. Conclusion: Both the inconsistency in density and log level and the convergence of context have forced us to question whether it is a reliable means to understand the runtime behavior of software systems via analyzing the logs produced by the current logging practice.}, +author = {Rong, Guoping and Gu, Shenghui and Zhang, He and Shao, Dong and Liu, Wanggen}, +doi = {10.1109/ASWEC.2018.00031}, +file = {:C$\backslash$:/WEB/SOURCE/Masters/docs/research/Rong et al. - 2018 - How is logging practice implemented in open source software projects A preliminary exploration.pdf:pdf}, +isbn = {9781728112411}, +journal = {Proceedings - 25th Australasian Software Engineering Conference, ASWEC 2018}, +keywords = {Empirical study,Java-based,Log,Logging practice}, +pages = {171--180}, +publisher = {IEEE}, +title = {{How is logging practice implemented in open source software projects? A preliminary exploration}}, year = {2018} }