Blob


1 /* Generic infrastructure to implement various diff algorithms. */
2 /*
3 * Copyright (c) 2020 Neels Hofmeyr <neels@hofmeyr.de>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
18 struct diff_range {
19 int start;
20 int end;
21 };
23 /* List of all possible return codes of a diff invocation. */
24 #define DIFF_RC_USE_DIFF_ALGO_FALLBACK -1
25 #define DIFF_RC_OK 0
26 /* Any positive return values are errno values from sys/errno.h */
28 struct diff_atom;
30 /* For each file, there is a "root" struct diff_data referencing the entire
31 * file, which the atoms are parsed from. In recursion of diff algorithm, there
32 * may be "child" struct diff_data only referencing a subsection of the file,
33 * re-using the atoms parsing. For "root" structs, atoms_allocated will be
34 * nonzero, indicating that the array of atoms is owned by that struct. For
35 * "child" structs, atoms_allocated == 0, to indicate that the struct is
36 * referencing a subset of atoms. */
37 struct diff_data {
38 FILE *f; /* if root diff_data and not memory-mapped */
39 off_t pos; /* if not memory-mapped */
40 const uint8_t *data; /* if memory-mapped */
41 off_t len;
43 ARRAYLIST(struct diff_atom) atoms;
44 struct diff_data *root;
45 struct diff_data *current;
46 void *algo_data;
48 int diff_flags;
50 int err;
51 };
53 #define DIFF_FLAG_IGNORE_WHITESPACE 0x00000001
54 #define DIFF_FLAG_SHOW_PROTOTYPES 0x00000002
56 void diff_data_free(struct diff_data *diff_data);
58 struct diff_chunk;
59 typedef ARRAYLIST(struct diff_chunk) diff_chunk_arraylist_t;
61 struct diff_result {
62 int rc;
63 struct diff_data left;
64 struct diff_data right;
65 diff_chunk_arraylist_t chunks;
66 };
68 struct diff_state;
70 /* Signature of a utility function to divide both source files into diff atoms.
71 * It is possible that a (future) algorithm requires both source files to decide
72 * on atom split points, hence this gets both left and right to atomize at the
73 * same time.
74 * An example is diff_atomize_text_by_line() in diff_atomize_text.c.
75 *
76 * func_data: context pointer (free to be used by implementation).
77 * left: struct diff_data with left->data and left->len already set up, and
78 * left->atoms to be created.
79 * right: struct diff_data with right->data and right->len already set up, and
80 * right->atoms to be created.
81 */
82 typedef int (*diff_atomize_func_t)(void *func_data,
83 struct diff_data *left,
84 struct diff_data *right);
86 extern int diff_atomize_text_by_line(void *func_data,
87 struct diff_data *left,
88 struct diff_data *right);
90 struct diff_algo_config;
91 typedef int (*diff_algo_impl_t)(
92 const struct diff_algo_config *algo_config, struct diff_state *state);
94 /* Form a result with all left-side removed and all right-side added, i.e. no
95 * actual diff algorithm involved. */
96 int diff_algo_none(const struct diff_algo_config *algo_config,
97 struct diff_state *state);
99 /* Myers Diff tracing from the start all the way through to the end, requiring
100 * quadratic amounts of memory. This can fail if the required space surpasses
101 * algo_config->permitted_state_size. */
102 extern int diff_algo_myers(const struct diff_algo_config *algo_config,
103 struct diff_state *state);
105 /* Myers "Divide et Impera": tracing forwards from the start and backwards from
106 * the end to find a midpoint that divides the problem into smaller chunks.
107 * Requires only linear amounts of memory. */
108 extern int diff_algo_myers_divide(
109 const struct diff_algo_config *algo_config, struct diff_state *state);
111 /* Patience Diff algorithm, which divides a larger diff into smaller chunks. For
112 * very specific scenarios, it may lead to a complete diff result by itself, but
113 * needs a fallback algo to solve chunks that don't have common-unique atoms. */
114 extern int diff_algo_patience(
115 const struct diff_algo_config *algo_config, struct diff_state *state);
117 /* Diff algorithms to use, possibly nested. For example:
119 * struct diff_algo_config myers, patience, myers_divide;
121 * myers = (struct diff_algo_config){
122 * .impl = diff_algo_myers,
123 * .permitted_state_size = 32 * 1024 * 1024,
124 * // When too large, do diff_algo_patience:
125 * .fallback_algo = &patience,
126 * };
128 * const struct diff_algo_config patience = (struct diff_algo_config){
129 * .impl = diff_algo_patience,
130 * // After subdivision, do Patience again:
131 * .inner_algo = &patience,
132 * // If subdivision failed, do Myers Divide et Impera:
133 * .fallback_algo = &myers_then_myers_divide,
134 * };
136 * const struct diff_algo_config myers_divide = (struct diff_algo_config){
137 * .impl = diff_algo_myers_divide,
138 * // When division succeeded, start from the top:
139 * .inner_algo = &myers_then_myers_divide,
140 * // (fallback_algo = NULL implies diff_algo_none).
141 * };
142 * struct diff_config config = {
143 * .algo = &myers,
144 * ...
145 * };
146 * diff_main(&config, ...);
147 */
148 struct diff_algo_config {
149 diff_algo_impl_t impl;
151 /* Fail this algo if it would use more than this amount of memory, and
152 * instead use fallback_algo (diff_algo_myers). permitted_state_size ==
153 * 0 means no limitation. */
154 size_t permitted_state_size;
156 /* For algorithms that divide into smaller chunks, use this algorithm to
157 * solve the divided chunks. */
158 const struct diff_algo_config *inner_algo;
160 /* If the algorithm fails (e.g. diff_algo_myers_if_small needs too large
161 * state, or diff_algo_patience can't find any common-unique atoms),
162 * then use this algorithm instead. */
163 const struct diff_algo_config *fallback_algo;
164 };
166 struct diff_config {
167 diff_atomize_func_t atomize_func;
168 void *atomize_func_data;
170 const struct diff_algo_config *algo;
172 /* How deep to step into subdivisions of a source file, a paranoia /
173 * safety measure to guard against infinite loops through diff
174 * algorithms. When the maximum recursion is reached, employ
175 * diff_algo_none (i.e. remove all left atoms and add all right atoms).
176 */
177 unsigned int max_recursion_depth;
178 };
180 struct diff_result *diff_main(const struct diff_config *config,
181 FILE *left_f, const uint8_t *left_data,
182 off_t left_len,
183 FILE *right_f, const uint8_t *right_data,
184 off_t right_len, int diff_flags);
185 void diff_result_free(struct diff_result *result);