Blob


1 /* Generic infrastructure to implement various diff algorithms. */
2 /*
3 * Copyright (c) 2020 Neels Hofmeyr <neels@hofmeyr.de>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
18 #pragma once
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdbool.h>
23 #include <string.h>
24 #include <errno.h>
26 #include <diff/arraylist.h>
28 #ifndef MAX
29 #define MAX(A,B) ((A)>(B)?(A):(B))
30 #endif
31 #ifndef MIN
32 #define MIN(A,B) ((A)<(B)?(A):(B))
33 #endif
35 struct range {
36 int start;
37 int end;
38 };
40 static inline bool
41 range_empty(const struct range *r)
42 {
43 return r->start == r->end;
44 }
46 static inline bool
47 ranges_touch(const struct range *a, const struct range *b)
48 {
49 return (a->end >= b->start) && (a->start <= b->end);
50 }
52 static inline void
53 ranges_merge(struct range *a, const struct range *b)
54 {
55 *a = (struct range){
56 .start = MIN(a->start, b->start),
57 .end = MAX(a->end, b->end),
58 };
59 }
61 static inline int
62 range_len(const struct range *r)
63 {
64 if (!r)
65 return 0;
66 return r->end - r->start;
67 }
69 /* List of all possible return codes of a diff invocation. */
70 enum diff_rc {
71 DIFF_RC_USE_DIFF_ALGO_FALLBACK = -1,
72 DIFF_RC_OK = 0,
73 DIFF_RC_ENOTSUP = ENOTSUP,
74 DIFF_RC_ENOMEM = ENOMEM,
75 DIFF_RC_EINVAL = EINVAL,
76 };
78 struct diff_atom {
79 const uint8_t *at;
80 size_t len;
82 /* This hash is just a very cheap speed up for finding *mismatching* atoms. When hashes match, we still need to
83 * compare entire atoms to find out whether they are indeed identical or not. */
84 unsigned int hash;
86 /* State for the Patience Diff algorithm */
87 /* TODO: keep a separate array for the patience state */
88 struct {
89 bool unique_here;
90 bool unique_in_both;
91 struct diff_atom *pos_in_other;
92 struct diff_atom *prev_stack;
93 struct range identical_lines;
94 } patience;
95 };
97 static inline bool
98 diff_atom_same(const struct diff_atom *left, const struct diff_atom *right)
99 {
100 return left->hash == right->hash
101 && left->len == right->len
102 && memcmp(left->at, right->at, left->len) == 0;
105 /* For each file, there is a "root" struct diff_data referencing the entire file, which the atoms are parsed from. In
106 * recursion of diff algorithm, there may be "child" struct diff_data only referencing a subsection of the file,
107 * re-using the atoms parsing. For "root" structs, atoms_allocated will be nonzero, indicating that the array of atoms
108 * is owned by that struct. For "child" structs, atoms_allocated == 0, to indicate that the struct is referencing a
109 * subset of atoms. */
110 struct diff_data {
111 const uint8_t *data;
112 size_t len;
113 ARRAYLIST(struct diff_atom) atoms;
114 struct diff_data *root;
115 };
117 void diff_data_free(struct diff_data *diff_data);
119 /* The atom's index in the entire file. For atoms divided by lines of text, this yields the line number (starting with
120 * 0). Also works for diff_data that reference only a subsection of a file, always reflecting the global position in
121 * the file (and not the relative position within the subsection). */
122 #define diff_atom_root_idx(DIFF_DATA, ATOM) \
123 ((ATOM) && ((ATOM) >= (DIFF_DATA)->root->atoms.head) \
124 ? (unsigned int)((ATOM) - ((DIFF_DATA)->root->atoms.head)) \
125 : (DIFF_DATA)->root->atoms.len)
127 /* The atom's index within DIFF_DATA. For atoms divided by lines of text, this yields the line number (starting with
128 * 0). */
129 #define diff_atom_idx(DIFF_DATA, ATOM) \
130 ((ATOM) && ((ATOM) >= (DIFF_DATA)->atoms.head) \
131 ? (unsigned int)((ATOM) - ((DIFF_DATA)->atoms.head)) \
132 : (DIFF_DATA)->atoms.len)
134 #define foreach_diff_atom(ATOM, FIRST_ATOM, COUNT) \
135 for ((ATOM) = (FIRST_ATOM); \
136 (ATOM) && ((ATOM) >= (FIRST_ATOM)) && ((ATOM) - (FIRST_ATOM) < (COUNT)); \
137 (ATOM)++)
139 #define diff_data_foreach_atom(ATOM, DIFF_DATA) \
140 foreach_diff_atom(ATOM, (DIFF_DATA)->atoms.head, (DIFF_DATA)->atoms.len)
142 #define diff_data_foreach_atom_from(FROM, ATOM, DIFF_DATA) \
143 for ((ATOM) = (FROM); \
144 (ATOM) && ((ATOM) >= (DIFF_DATA)->atoms.head) && ((ATOM) - (DIFF_DATA)->atoms.head < (DIFF_DATA)->atoms.len); \
145 (ATOM)++)
147 /* A diff chunk represents a set of atoms on the left and/or a set of atoms on the right.
149 * If solved == false:
150 * The diff algorithm has divided the source file, and this is a chunk that the inner_algo should run on next.
151 * The lines on the left should be diffed against the lines on the right.
152 * (If there are no left lines or no right lines, it implies solved == true, because there is nothing to diff.)
154 * If solved == true:
155 * If there are only left atoms, it is a chunk removing atoms from the left ("a minus chunk").
156 * If there are only right atoms, it is a chunk adding atoms from the right ("a plus chunk").
157 * If there are both left and right lines, it is a chunk of equal content on both sides,
158 * and left_count == right_count:
160 * - foo }
161 * - bar }-- diff_chunk{ left_start = &left.atoms.head[0], left_count = 3,
162 * - baz } right_start = NULL, right_count = 0 }
163 * moo }
164 * goo }-- diff_chunk{ left_start = &left.atoms.head[3], left_count = 3,
165 * zoo } right_start = &right.atoms.head[0], right_count = 3 }
166 * +loo }
167 * +roo }-- diff_chunk{ left_start = NULL, left_count = 0,
168 * +too } right_start = &right.atoms.head[3], right_count = 3 }
170 */
171 struct diff_chunk {
172 bool solved;
173 struct diff_atom *left_start;
174 unsigned int left_count;
175 struct diff_atom *right_start;
176 unsigned int right_count;
177 };
179 typedef ARRAYLIST(struct diff_chunk) diff_chunk_arraylist_t;
180 #define DIFF_RESULT_ALLOC_BLOCKSIZE 128
182 struct diff_result {
183 enum diff_rc rc;
184 struct diff_data left;
185 struct diff_data right;
186 diff_chunk_arraylist_t chunks;
187 };
189 struct diff_state {
190 /* The final result passed to the original diff caller. */
191 struct diff_result *result;
193 /* The root diff_data is in result->left,right, these are (possibly) subsections of the root data. */
194 struct diff_data left;
195 struct diff_data right;
197 unsigned int recursion_depth_left;
199 /* Remaining chunks from one diff algorithm pass, if any solved == false chunks came up. */
200 diff_chunk_arraylist_t temp_result;
201 };
203 struct diff_chunk *diff_state_add_chunk(struct diff_state *state, bool solved,
204 struct diff_atom *left_start, unsigned int left_count,
205 struct diff_atom *right_start, unsigned int right_count);
207 /* Signature of a utility function to divide both source files into diff atoms.
208 * It is possible that a (future) algorithm requires both source files to decide on atom split points, hence this gets
209 * both left and right to atomize at the same time.
210 * An example is diff_atomize_text_by_line() in diff_atomize_text.c.
212 * func_data: context pointer (free to be used by implementation).
213 * left: struct diff_data with left->data and left->len already set up, and left->atoms to be created.
214 * right: struct diff_data with right->data and right->len already set up, and right->atoms to be created.
215 */
216 typedef enum diff_rc (*diff_atomize_func_t)(void *func_data, struct diff_data *left, struct diff_data *right);
218 extern enum diff_rc diff_atomize_text_by_line(void *func_data, struct diff_data *left, struct diff_data *right);
220 struct diff_algo_config;
221 typedef enum diff_rc (*diff_algo_impl_t)(const struct diff_algo_config *algo_config, struct diff_state *state);
223 /* Form a result with all left-side removed and all right-side added, i.e. no actual diff algorithm involved. */
224 enum diff_rc diff_algo_none(const struct diff_algo_config *algo_config, struct diff_state *state);
226 /* Myers Diff tracing from the start all the way through to the end, requiring quadratic amounts of memory. This can
227 * fail if the required space surpasses algo_config->permitted_state_size. */
228 extern enum diff_rc diff_algo_myers(const struct diff_algo_config *algo_config, struct diff_state *state);
230 /* Myers "Divide et Impera": tracing forwards from the start and backwards from the end to find a midpoint that divides
231 * the problem into smaller chunks. Requires only linear amounts of memory. */
232 extern enum diff_rc diff_algo_myers_divide(const struct diff_algo_config *algo_config, struct diff_state *state);
234 /* Patience Diff algorithm, which divides a larger diff into smaller chunks. For very specific scenarios, it may lead to
235 * a complete diff result by itself, but needs a fallback algo to solve chunks that don't have common-unique atoms. */
236 extern enum diff_rc diff_algo_patience(const struct diff_algo_config *algo_config, struct diff_state *state);
238 /* Diff algorithms to use, possibly nested. For example:
240 * struct diff_algo_config myers, patience, myers_divide;
242 * myers = (struct diff_algo_config){
243 * .impl = diff_algo_myers,
244 * .permitted_state_size = 32 * 1024 * 1024,
245 * .fallback_algo = &patience, // when too large, do diff_algo_patience
246 * };
248 * patience = (struct diff_algo_config){
249 * .impl = diff_algo_patience,
250 * .inner_algo = &patience, // After subdivision, do Patience again.
251 * .fallback_algo = &myers_divide, // If subdivision failed, do Myers Divide et Impera.
252 * };
254 * myers_divide = (struct diff_algo_config){
255 * .impl = diff_algo_myers_divide,
256 * .inner_algo = &myers, // When division succeeded, start from the top.
257 * // (fallback_algo = NULL implies diff_algo_none).
258 * };
260 * struct diff_config config = {
261 * .algo = &myers,
262 * ...
263 * };
264 * diff_main(&config, ...);
265 */
266 struct diff_algo_config {
267 diff_algo_impl_t impl;
269 /* Fail this algo if it would use more than this amount of memory, and instead use fallback_algo
270 * (diff_algo_myers). permitted_state_size == 0 means no limitation. */
271 size_t permitted_state_size;
273 /* For algorithms that divide into smaller chunks, use this algorithm to solve the divided chunks. */
274 const struct diff_algo_config *inner_algo;
276 /* If the algorithm fails (e.g. diff_algo_myers_if_small needs too large state, or diff_algo_patience can't find
277 * any common-unique atoms), then use this algorithm instead. */
278 const struct diff_algo_config *fallback_algo;
279 };
281 struct diff_config {
282 diff_atomize_func_t atomize_func;
283 void *atomize_func_data;
285 const struct diff_algo_config *algo;
287 /* How deep to step into subdivisions of a source file, a paranoia / safety measure to guard against infinite
288 * loops through diff algorithms. When the maximum recursion is reached, employ diff_algo_none (i.e. remove all
289 * left atoms and add all right atoms). */
290 unsigned int max_recursion_depth;
291 };
293 struct diff_result *diff_main(const struct diff_config *config,
294 const uint8_t *left_data, size_t left_len,
295 const uint8_t *right_data, size_t right_len);
296 void diff_result_free(struct diff_result *result);