Blob


1 /* Generic infrastructure to implement various diff algorithms. */
2 /*
3 * Copyright (c) 2020 Neels Hofmeyr <neels@hofmeyr.de>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
18 #pragma once
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdbool.h>
23 #include <string.h>
24 #include <errno.h>
26 #include <diff/arraylist.h>
28 /* List of all possible return codes of a diff invocation. */
29 enum diff_rc {
30 DIFF_RC_USE_DIFF_ALGO_FALLBACK = -1,
31 DIFF_RC_OK = 0,
32 DIFF_RC_ENOTSUP = ENOTSUP,
33 DIFF_RC_ENOMEM = ENOMEM,
34 DIFF_RC_EINVAL = EINVAL,
35 };
37 struct diff_atom {
38 const uint8_t *at;
39 size_t len;
41 /* This hash is just a very cheap speed up for finding *mismatching* atoms. When hashes match, we still need to
42 * compare entire atoms to find out whether they are indeed identical or not. */
43 unsigned int hash;
45 /* State for the Patience Diff algorithm */
46 /* TODO: keep a separate array for the patience state */
47 struct {
48 bool unique_here;
49 bool unique_in_both;
50 struct diff_atom *pos_in_other;
51 struct diff_atom *prev_stack;
52 } patience;
53 };
55 static inline bool diff_atom_same(const struct diff_atom *left, const struct diff_atom *right)
56 {
57 return left->hash == right->hash
58 && left->len == right->len
59 && memcmp(left->at, right->at, left->len) == 0;
60 }
62 /* For each file, there is a "root" struct diff_data referencing the entire file, which the atoms are parsed from. In
63 * recursion of diff algorithm, there may be "child" struct diff_data only referencing a subsection of the file,
64 * re-using the atoms parsing. For "root" structs, atoms_allocated will be nonzero, indicating that the array of atoms
65 * is owned by that struct. For "child" structs, atoms_allocated == 0, to indicate that the struct is referencing a
66 * subset of atoms. */
67 struct diff_data {
68 const uint8_t *data;
69 size_t len;
70 ARRAYLIST(struct diff_atom) atoms;
71 struct diff_data *root;
72 };
74 void diff_data_free(struct diff_data *diff_data);
76 /* The atom's index in the entire file. For atoms divided by lines of text, this yields the line number (starting with
77 * 0). Also works for diff_data that reference only a subsection of a file, always reflecting the global position in
78 * the file (and not the relative position within the subsection). */
79 #define diff_atom_root_idx(DIFF_DATA, ATOM) \
80 ((ATOM) ? (ATOM) - ((DIFF_DATA)->root->atoms.head) : (DIFF_DATA)->root->atoms.len)
82 /* The atom's index within DIFF_DATA. For atoms divided by lines of text, this yields the line number (starting with
83 * 0). */
84 #define diff_atom_idx(DIFF_DATA, ATOM) \
85 ((ATOM) ? (ATOM) - ((DIFF_DATA)->atoms.head) : (DIFF_DATA)->atoms.len)
87 #define foreach_diff_atom(ATOM, FIRST_ATOM, COUNT) \
88 for ((ATOM) = (FIRST_ATOM); \
89 (ATOM) && ((ATOM) >= (FIRST_ATOM)) && ((ATOM) - (FIRST_ATOM) < (COUNT)); \
90 (ATOM)++)
92 #define diff_data_foreach_atom(ATOM, DIFF_DATA) \
93 foreach_diff_atom(ATOM, (DIFF_DATA)->atoms.head, (DIFF_DATA)->atoms.len)
95 #define diff_data_foreach_atom_from(FROM, ATOM, DIFF_DATA) \
96 for ((ATOM) = (FROM); \
97 (ATOM) && ((ATOM) >= (DIFF_DATA)->atoms.head) && ((ATOM) - (DIFF_DATA)->atoms.head < (DIFF_DATA)->atoms.len); \
98 (ATOM)++)
100 /* A diff chunk represents a set of atoms on the left and/or a set of atoms on the right.
102 * If solved == false:
103 * The diff algorithm has divided the source file, and this is a chunk that the inner_algo should run on next.
104 * The lines on the left should be diffed against the lines on the right.
105 * (If there are no left lines or no right lines, it implies solved == true, because there is nothing to diff.)
107 * If solved == true:
108 * If there are only left atoms, it is a chunk removing atoms from the left ("a minus chunk").
109 * If there are only right atoms, it is a chunk adding atoms from the right ("a plus chunk").
110 * If there are both left and right lines, it is a chunk of equal content on both sides,
111 * and left_count == right_count:
113 * - foo }
114 * - bar }-- diff_chunk{ left_start = &left.atoms.head[0], left_count = 3,
115 * - baz } right_start = NULL, right_count = 0 }
116 * moo }
117 * goo }-- diff_chunk{ left_start = &left.atoms.head[3], left_count = 3,
118 * zoo } right_start = &right.atoms.head[0], right_count = 3 }
119 * +loo }
120 * +roo }-- diff_chunk{ left_start = NULL, left_count = 0,
121 * +too } right_start = &right.atoms.head[3], right_count = 3 }
123 */
124 struct diff_chunk {
125 bool solved;
126 struct diff_atom *left_start;
127 unsigned int left_count;
128 struct diff_atom *right_start;
129 unsigned int right_count;
130 };
132 typedef ARRAYLIST(struct diff_chunk) diff_chunk_arraylist_t;
133 #define DIFF_RESULT_ALLOC_BLOCKSIZE 128
135 struct diff_result {
136 enum diff_rc rc;
137 struct diff_data left;
138 struct diff_data right;
139 diff_chunk_arraylist_t chunks;
140 };
142 struct diff_state {
143 /* The final result passed to the original diff caller. */
144 struct diff_result *result;
146 /* The root diff_data is in result->left,right, these are (possibly) subsections of the root data. */
147 struct diff_data left;
148 struct diff_data right;
150 unsigned int recursion_depth_left;
152 /* Remaining chunks from one diff algorithm pass, if any solved == false chunks came up. */
153 diff_chunk_arraylist_t temp_result;
154 };
156 struct diff_chunk *diff_state_add_chunk(struct diff_state *state, bool solved,
157 struct diff_atom *left_start, unsigned int left_count,
158 struct diff_atom *right_start, unsigned int right_count);
160 /* Signature of a utility function to divide both source files into diff atoms.
161 * It is possible that a (future) algorithm requires both source files to decide on atom split points, hence this gets
162 * both left and right to atomize at the same time.
163 * An example is diff_atomize_text_by_line() in diff_atomize_text.c.
165 * func_data: context pointer (free to be used by implementation).
166 * left: struct diff_data with left->data and left->len already set up, and left->atoms to be created.
167 * right: struct diff_data with right->data and right->len already set up, and right->atoms to be created.
168 */
169 typedef enum diff_rc (*diff_atomize_func_t)(void *func_data, struct diff_data *left, struct diff_data *right);
171 extern enum diff_rc diff_atomize_text_by_line(void *func_data, struct diff_data *left, struct diff_data *right);
173 struct diff_algo_config;
174 typedef enum diff_rc (*diff_algo_impl_t)(const struct diff_algo_config *algo_config, struct diff_state *state);
176 /* Form a result with all left-side removed and all right-side added, i.e. no actual diff algorithm involved. */
177 enum diff_rc diff_algo_none(const struct diff_algo_config *algo_config, struct diff_state *state);
179 /* Myers Diff tracing from the start all the way through to the end, requiring quadratic amounts of memory. This can
180 * fail if the required space surpasses algo_config->permitted_state_size. */
181 extern enum diff_rc diff_algo_myers(const struct diff_algo_config *algo_config, struct diff_state *state);
183 /* Myers "Divide et Impera": tracing forwards from the start and backwards from the end to find a midpoint that divides
184 * the problem into smaller chunks. Requires only linear amounts of memory. */
185 extern enum diff_rc diff_algo_myers_divide(const struct diff_algo_config *algo_config, struct diff_state *state);
187 /* Patience Diff algorithm, which divides a larger diff into smaller chunks. For very specific scenarios, it may lead to
188 * a complete diff result by itself, but needs a fallback algo to solve chunks that don't have common-unique atoms. */
189 extern enum diff_rc diff_algo_patience(const struct diff_algo_config *algo_config, struct diff_state *state);
191 /* Diff algorithms to use, possibly nested. For example:
193 * struct diff_algo_config myers, patience, myers_divide;
195 * myers = (struct diff_algo_config){
196 * .impl = diff_algo_myers,
197 * .permitted_state_size = 32 * 1024 * 1024,
198 * .fallback_algo = &patience, // when too large, do diff_algo_patience
199 * };
201 * patience = (struct diff_algo_config){
202 * .impl = diff_algo_patience,
203 * .inner_algo = &patience, // After subdivision, do Patience again.
204 * .fallback_algo = &myers_divide, // If subdivision failed, do Myers Divide et Impera.
205 * };
207 * myers_divide = (struct diff_algo_config){
208 * .impl = diff_algo_myers_divide,
209 * .inner_algo = &myers, // When division succeeded, start from the top.
210 * // (fallback_algo = NULL implies diff_algo_none).
211 * };
213 * struct diff_config config = {
214 * .algo = &myers,
215 * ...
216 * };
217 * diff_main(&config, ...);
218 */
219 struct diff_algo_config {
220 diff_algo_impl_t impl;
222 /* Fail this algo if it would use more than this amount of memory, and instead use fallback_algo
223 * (diff_algo_myers). permitted_state_size == 0 means no limitation. */
224 size_t permitted_state_size;
226 /* For algorithms that divide into smaller chunks, use this algorithm to solve the divided chunks. */
227 const struct diff_algo_config *inner_algo;
229 /* If the algorithm fails (e.g. diff_algo_myers_if_small needs too large state, or diff_algo_patience can't find
230 * any common-unique atoms), then use this algorithm instead. */
231 const struct diff_algo_config *fallback_algo;
232 };
234 struct diff_config {
235 diff_atomize_func_t atomize_func;
236 void *atomize_func_data;
238 const struct diff_algo_config *algo;
240 /* How deep to step into subdivisions of a source file, a paranoia / safety measure to guard against infinite
241 * loops through diff algorithms. When the maximum recursion is reached, employ diff_algo_none (i.e. remove all
242 * left atoms and add all right atoms). */
243 unsigned int max_recursion_depth;
244 };
246 struct diff_result *diff_main(const struct diff_config *config,
247 const uint8_t *left_data, size_t left_len,
248 const uint8_t *right_data, size_t right_len);
249 void diff_result_free(struct diff_result *result);