@book{dbce83a897ac416d9b3a3fd1f762fe9d,
title = "Analysing Ferret XML reports to estimate the density of copied code",
abstract = "This document explains a method for identifying dense blocks of copied text in pairs of files. The files are compared suing Ferret, a copy-detection tool which computes a similarity score based on trigrams. This similarity score cannot determine the arrangement of copied text in a file; two files with the same similarity to another file may have different distributions of matched trigrams in the file. For example, in one file the matched trigrams may be in a large block, while they are scattered throughout the other file. However, Ferret produces an XML report which relates matched and unmatched trigrams back to the original text. This report can be analysed to find identical or densely copied blocks in the files. We address the problems of defining and locating the blocks, and of representing the blocks found as a meaningful feature vector, regardless of copy pattern. We provide a step-by-step example to explain our method for finding dense blocks. A set of artificial files, built to mimic different copy patterns, is used to explore a set of features which profile the dense blocks in a file. A range of density parameters is used to construct features which show that the copy patterns in the artificial files can be separated.",
keywords = "density analysis, code duplication, Ferret",
author = "Pamela Green and Peter Lane and Austen Rainer and Sven-Bodo Scholz",
year = "2010",
language = "English",
series = "UH Computer Science Technical Report",
publisher = "University of Hertfordshire",
}