Blob


1 /* Implementation of the Patience Diff algorithm invented by Bram Cohen:
2 * Divide a diff problem into smaller chunks by an LCS (Longest Common Sequence)
3 * of common-unique lines. */
4 /*
5 * Copyright (c) 2020 Neels Hofmeyr <neels@hofmeyr.de>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
20 #include <assert.h>
21 #include <inttypes.h>
22 #include <errno.h>
23 #include <stdbool.h>
24 #include <stdio.h>
25 #include <stdlib.h>
27 #include <arraylist.h>
28 #include <diff_main.h>
30 #include "diff_internal.h"
31 #include "diff_debug.h"
33 /* Algorithm to find unique lines:
34 * 0: stupidly iterate atoms
35 * 1: qsort
36 * 2: mergesort
37 */
38 #define UNIQUE_STRATEGY 1
40 /* Per-atom state for the Patience Diff algorithm */
41 struct atom_patience {
42 #if UNIQUE_STRATEGY == 0
43 bool unique_here;
44 #endif
45 bool unique_in_both;
46 struct diff_atom *pos_in_other;
47 struct diff_atom *prev_stack;
48 struct diff_range identical_lines;
49 };
51 /* A diff_atom has a backpointer to the root diff_data. That points to the
52 * current diff_data, a possibly smaller section of the root. That current
53 * diff_data->algo_data is a pointer to an array of struct atom_patience. The
54 * atom's index in current diff_data gives the index in the atom_patience array.
55 */
56 #define PATIENCE(ATOM) \
57 (((struct atom_patience*)((ATOM)->root->current->algo_data))\
58 [diff_atom_idx((ATOM)->root->current, ATOM)])
60 #if UNIQUE_STRATEGY == 0
62 /* Stupid iteration and comparison of all atoms */
63 static int
64 diff_atoms_mark_unique(struct diff_data *d, unsigned int *unique_count)
65 {
66 struct diff_atom *i;
67 unsigned int count = 0;
68 diff_data_foreach_atom(i, d) {
69 PATIENCE(i).unique_here = true;
70 PATIENCE(i).unique_in_both = true;
71 count++;
72 }
73 diff_data_foreach_atom(i, d) {
74 struct diff_atom *j;
76 if (!PATIENCE(i).unique_here)
77 continue;
79 diff_data_foreach_atom_from(i + 1, j, d) {
80 bool same;
81 int r = diff_atom_same(&same, i, j);
82 if (r)
83 return r;
84 if (!same)
85 continue;
86 if (PATIENCE(i).unique_here) {
87 PATIENCE(i).unique_here = false;
88 PATIENCE(i).unique_in_both = false;
89 count--;
90 }
91 PATIENCE(j).unique_here = false;
92 PATIENCE(j).unique_in_both = false;
93 count--;
94 }
95 }
96 if (unique_count)
97 *unique_count = count;
98 return 0;
99 }
101 /* Mark those lines as PATIENCE(atom).unique_in_both = true that appear exactly
102 * once in each side. */
103 static int
104 diff_atoms_mark_unique_in_both(struct diff_data *left, struct diff_data *right,
105 unsigned int *unique_in_both_count)
107 /* Derive the final unique_in_both count without needing an explicit
108 * iteration. So this is just some optimiziation to save one iteration
109 * in the end. */
110 unsigned int unique_in_both;
111 int r;
113 r = diff_atoms_mark_unique(left, &unique_in_both);
114 if (r)
115 return r;
116 r = diff_atoms_mark_unique(right, NULL);
117 if (r)
118 return r;
120 debug("unique_in_both %u\n", unique_in_both);
122 struct diff_atom *i;
123 diff_data_foreach_atom(i, left) {
124 if (!PATIENCE(i).unique_here)
125 continue;
126 struct diff_atom *j;
127 int found_in_b = 0;
128 diff_data_foreach_atom(j, right) {
129 bool same;
130 int r = diff_atom_same(&same, i, j);
131 if (r)
132 return r;
133 if (!same)
134 continue;
135 if (!PATIENCE(j).unique_here) {
136 found_in_b = 2; /* or more */
137 break;
138 } else {
139 found_in_b = 1;
140 PATIENCE(j).pos_in_other = i;
141 PATIENCE(i).pos_in_other = j;
145 if (found_in_b == 0 || found_in_b > 1) {
146 PATIENCE(i).unique_in_both = false;
147 unique_in_both--;
148 debug("unique_in_both %u (%d) ", unique_in_both,
149 found_in_b);
150 debug_dump_atom(left, NULL, i);
154 /* Still need to unmark right[*]->patience.unique_in_both for atoms that
155 * don't exist in left */
156 diff_data_foreach_atom(i, right) {
157 if (!PATIENCE(i).unique_here
158 || !PATIENCE(i).unique_in_both)
159 continue;
160 struct diff_atom *j;
161 bool found_in_a = false;
162 diff_data_foreach_atom(j, left) {
163 bool same;
164 int r;
165 if (!PATIENCE(j).unique_in_both)
166 continue;
167 r = diff_atom_same(&same, i, j);
168 if (r)
169 return r;
170 if (!same)
171 continue;
172 found_in_a = true;
173 break;
176 if (!found_in_a)
177 PATIENCE(i).unique_in_both = false;
180 if (unique_in_both_count)
181 *unique_in_both_count = unique_in_both;
182 return 0;
185 #else /* UNIQUE_STRATEGY != 0 */
187 /* Use an optimized sorting algorithm (qsort, mergesort) to find unique lines */
189 int diff_atoms_compar(const void *_a, const void *_b)
191 const struct diff_atom *a = *(struct diff_atom**)_a;
192 const struct diff_atom *b = *(struct diff_atom**)_b;
193 int cmp;
194 int rc = 0;
196 /* If there's been an error (e.g. I/O error) in a previous compar, we
197 * have no way to abort the sort but just report the rc and stop
198 * comparing. Make sure to catch errors on either side. If atoms are
199 * from more than one diff_data, make sure the error, if any, spreads
200 * to all of them, so we can cut short all future comparisons. */
201 if (a->root->err)
202 rc = a->root->err;
203 if (b->root->err)
204 rc = b->root->err;
205 if (rc) {
206 a->root->err = rc;
207 b->root->err = rc;
208 /* just return 'equal' to not swap more positions */
209 return 0;
212 /* Sort by the simplistic hash */
213 if (a->hash < b->hash)
214 return -1;
215 if (a->hash > b->hash)
216 return 1;
218 /* If hashes are the same, the lines may still differ. Do a full cmp. */
219 rc = diff_atom_cmp(&cmp, a, b);
221 if (rc) {
222 /* Mark the I/O error so that the caller can find out about it.
223 * For the case atoms are from more than one diff_data, mark in
224 * both. */
225 a->root->err = rc;
226 if (a->root != b->root)
227 b->root->err = rc;
228 return 0;
231 return cmp;
234 /* Sort an array of struct diff_atom* in-place. */
235 static int diff_atoms_sort(struct diff_atom *atoms[],
236 size_t atoms_count)
238 #if UNIQUE_STRATEGY == 1
239 qsort(atoms, atoms_count, sizeof(struct diff_atom*), diff_atoms_compar);
240 #else
241 mergesort(atoms, atoms_count, sizeof(struct diff_atom*),
242 diff_atoms_compar);
243 #endif
244 return atoms[0]->root->err;
247 static int
248 diff_atoms_mark_unique_in_both(struct diff_data *left, struct diff_data *right,
249 unsigned int *unique_in_both_count_p)
251 struct diff_atom *a;
252 struct diff_atom *b;
253 struct diff_atom **all_atoms;
254 unsigned int len = 0;
255 unsigned int i;
256 unsigned int unique_in_both_count = 0;
257 int rc;
259 all_atoms = calloc(left->atoms.len + right->atoms.len,
260 sizeof(struct diff_atom *));
261 if (all_atoms == NULL)
262 return ENOMEM;
264 left->err = 0;
265 right->err = 0;
266 left->root->err = 0;
267 right->root->err = 0;
268 diff_data_foreach_atom(a, left) {
269 all_atoms[len++] = a;
271 diff_data_foreach_atom(b, right) {
272 all_atoms[len++] = b;
275 rc = diff_atoms_sort(all_atoms, len);
276 if (rc)
277 goto free_and_exit;
279 /* Now we have a sorted array of atom pointers. All similar lines are
280 * adjacent. Walk through the array and mark those that are unique on
281 * each side, but exist once in both sources. */
282 for (i = 0; i < len; i++) {
283 bool same;
284 unsigned int next_differing_i;
285 unsigned int last_identical_i;
286 unsigned int j;
287 unsigned int count_first_side = 1;
288 unsigned int count_other_side = 0;
289 a = all_atoms[i];
290 debug("a: ");
291 debug_dump_atom(a->root, NULL, a);
293 /* Do as few diff_atom_cmp() as possible: first walk forward
294 * only using the cheap hash as indicator for differing atoms;
295 * then walk backwards until hitting an identical atom. */
296 for (next_differing_i = i + 1; next_differing_i < len;
297 next_differing_i++) {
298 b = all_atoms[next_differing_i];
299 if (a->hash != b->hash)
300 break;
302 for (last_identical_i = next_differing_i - 1;
303 last_identical_i > i;
304 last_identical_i--) {
305 b = all_atoms[last_identical_i];
306 rc = diff_atom_same(&same, a, b);
307 if (rc)
308 goto free_and_exit;
309 if (same)
310 break;
312 next_differing_i = last_identical_i + 1;
314 for (j = i+1; j < next_differing_i; j++) {
315 b = all_atoms[j];
316 /* A following atom is the same. See on which side the
317 * repetition counts. */
318 if (a->root == b->root)
319 count_first_side ++;
320 else
321 count_other_side ++;
322 debug("b: ");
323 debug_dump_atom(b->root, NULL, b);
324 debug(" count_first_side=%d count_other_side=%d\n",
325 count_first_side, count_other_side);
328 /* Counted a section of similar atoms, put the results back to
329 * the atoms. */
330 if ((count_first_side == 1)
331 && (count_other_side == 1)) {
332 b = all_atoms[i+1];
333 PATIENCE(a).unique_in_both = true;
334 PATIENCE(a).pos_in_other = b;
335 PATIENCE(b).unique_in_both = true;
336 PATIENCE(b).pos_in_other = a;
337 unique_in_both_count++;
340 /* j now points at the first atom after 'a' that is not
341 * identical to 'a'. j is always > i. */
342 i = j - 1;
344 *unique_in_both_count_p = unique_in_both_count;
345 rc = 0;
346 free_and_exit:
347 free(all_atoms);
348 return rc;
350 #endif /* UNIQUE_STRATEGY != 0 */
352 static int
353 diff_atoms_swallow_identical_neighbors(struct diff_data *left,
354 struct diff_data *right,
355 unsigned int *unique_in_both_count)
357 debug("trivially combine identical lines"
358 " around unique_in_both lines\n");
360 unsigned int l_idx;
361 unsigned int next_l_idx;
362 unsigned int l_min = 0;
363 unsigned int r_min = 0;
364 for (l_idx = 0; l_idx < left->atoms.len; l_idx = next_l_idx) {
365 next_l_idx = l_idx + 1;
366 struct diff_atom *l = &left->atoms.head[l_idx];
367 struct diff_atom *r;
369 if (!PATIENCE(l).unique_in_both)
370 continue;
372 debug("check identical lines around\n");
373 debug(" L "); debug_dump_atom(left, right, l);
375 unsigned int r_idx = diff_atom_idx(right, PATIENCE(l).pos_in_other);
376 r = &right->atoms.head[r_idx];
377 debug(" R "); debug_dump_atom(right, left, r);
379 struct diff_range identical_l;
380 struct diff_range identical_r;
382 /* Swallow upwards.
383 * Each common-unique line swallows identical lines upwards and
384 * downwards.
385 * Will never hit another identical common-unique line above on
386 * the left, because those would already have swallowed this
387 * common-unique line in a previous iteration.
388 */
389 for (identical_l.start = l_idx, identical_r.start = r_idx;
390 identical_l.start > l_min && identical_r.start > r_min;
391 identical_l.start--, identical_r.start--) {
392 bool same;
393 int rc = diff_atom_same(&same,
394 &left->atoms.head[identical_l.start - 1],
395 &right->atoms.head[identical_r.start - 1]);
396 if (rc)
397 return rc;
398 if (!same)
399 break;
402 /* Swallow downwards. Join any common-unique lines in a block of
403 * matching L/R lines with this one. */
404 for (identical_l.end = l_idx + 1, identical_r.end = r_idx + 1;
405 identical_l.end < left->atoms.len
406 && identical_r.end < right->atoms.len;
407 identical_l.end++, identical_r.end++,
408 next_l_idx++) {
409 struct diff_atom *l_end;
410 struct diff_atom *r_end;
411 bool same;
412 int rc = diff_atom_same(&same,
413 &left->atoms.head[identical_l.end],
414 &right->atoms.head[identical_r.end]);
415 if (rc)
416 return rc;
417 if (!same)
418 break;
419 l_end = &left->atoms.head[identical_l.end];
420 r_end = &right->atoms.head[identical_r.end];
421 if (!PATIENCE(l_end).unique_in_both)
422 continue;
423 /* A unique_in_both atom is joined with a preceding
424 * unique_in_both atom, remove the joined atom from
425 * listing of unique_in_both atoms */
426 PATIENCE(l_end).unique_in_both = false;
427 PATIENCE(r_end).unique_in_both = false;
428 (*unique_in_both_count)--;
431 PATIENCE(l).identical_lines = identical_l;
432 PATIENCE(r).identical_lines = identical_r;
434 l_min = identical_l.end;
435 r_min = identical_r.end;
437 if (!diff_range_empty(&PATIENCE(l).identical_lines)) {
438 debug("common-unique line at l=%u r=%u swallowed"
439 " identical lines l=%u-%u r=%u-%u\n",
440 l_idx, r_idx,
441 identical_l.start, identical_l.end,
442 identical_r.start, identical_r.end);
444 debug("next_l_idx = %u\n", next_l_idx);
446 return 0;
449 /* binary search to find the stack to put this atom "card" on. */
450 static int
451 find_target_stack(struct diff_atom *atom,
452 struct diff_atom **patience_stacks,
453 unsigned int patience_stacks_count)
455 unsigned int lo = 0;
456 unsigned int hi = patience_stacks_count;
457 while (lo < hi) {
458 unsigned int mid = (lo + hi) >> 1;
460 if (PATIENCE(patience_stacks[mid]).pos_in_other
461 < PATIENCE(atom).pos_in_other)
462 lo = mid + 1;
463 else
464 hi = mid;
466 return lo;
469 /* Among the lines that appear exactly once in each side, find the longest
470 * streak that appear in both files in the same order (with other stuff allowed
471 * to interleave). Use patience sort for that, as in the Patience Diff
472 * algorithm.
473 * See https://bramcohen.livejournal.com/73318.html and, for a much more
474 * detailed explanation,
475 * https://blog.jcoglan.com/2017/09/19/the-patience-diff-algorithm/ */
476 int
477 diff_algo_patience(const struct diff_algo_config *algo_config,
478 struct diff_state *state)
480 int rc;
481 struct diff_data *left = &state->left;
482 struct diff_data *right = &state->right;
483 struct atom_patience *atom_patience_left =
484 calloc(left->atoms.len, sizeof(struct atom_patience));
485 struct atom_patience *atom_patience_right =
486 calloc(right->atoms.len, sizeof(struct atom_patience));
487 unsigned int unique_in_both_count;
488 struct diff_atom **lcs = NULL;
490 debug("\n** %s\n", __func__);
492 left->root->current = left;
493 right->root->current = right;
494 left->algo_data = atom_patience_left;
495 right->algo_data = atom_patience_right;
497 /* Find those lines that appear exactly once in 'left' and exactly once
498 * in 'right'. */
499 rc = diff_atoms_mark_unique_in_both(left, right, &unique_in_both_count);
500 if (rc)
501 goto free_and_exit;
503 debug("unique_in_both_count %u\n", unique_in_both_count);
504 debug("left:\n");
505 debug_dump(left);
506 debug("right:\n");
507 debug_dump(right);
509 if (!unique_in_both_count) {
510 /* Cannot apply Patience, tell the caller to use fallback_algo
511 * instead. */
512 rc = DIFF_RC_USE_DIFF_ALGO_FALLBACK;
513 goto free_and_exit;
516 rc = diff_atoms_swallow_identical_neighbors(left, right,
517 &unique_in_both_count);
518 if (rc)
519 goto free_and_exit;
520 debug("After swallowing identical neighbors: unique_in_both = %u\n",
521 unique_in_both_count);
523 rc = ENOMEM;
525 /* An array of Longest Common Sequence is the result of the below
526 * subscope: */
527 unsigned int lcs_count = 0;
528 struct diff_atom *lcs_tail = NULL;
531 /* This subscope marks the lifetime of the atom_pointers
532 * allocation */
534 /* One chunk of storage for atom pointers */
535 struct diff_atom **atom_pointers;
536 atom_pointers = recallocarray(NULL, 0, unique_in_both_count * 2,
537 sizeof(struct diff_atom*));
538 if (atom_pointers == NULL)
539 return ENOMEM;
540 /* Half for the list of atoms that still need to be put on
541 * stacks */
542 struct diff_atom **uniques = atom_pointers;
544 /* Half for the patience sort state's "card stacks" -- we
545 * remember only each stack's topmost "card" */
546 struct diff_atom **patience_stacks;
547 patience_stacks = atom_pointers + unique_in_both_count;
548 unsigned int patience_stacks_count = 0;
550 /* Take all common, unique items from 'left' ... */
552 struct diff_atom *atom;
553 struct diff_atom **uniques_end = uniques;
554 diff_data_foreach_atom(atom, left) {
555 if (!PATIENCE(atom).unique_in_both)
556 continue;
557 *uniques_end = atom;
558 uniques_end++;
561 /* ...and sort them to the order found in 'right'.
562 * The idea is to find the leftmost stack that has a higher line
563 * number and add it to the stack's top.
564 * If there is no such stack, open a new one on the right. The
565 * line number is derived from the atom*, which are array items
566 * and hence reflect the relative position in the source file.
567 * So we got the common-uniques from 'left' and sort them
568 * according to PATIENCE(atom).pos_in_other. */
569 unsigned int i;
570 for (i = 0; i < unique_in_both_count; i++) {
571 atom = uniques[i];
572 unsigned int target_stack;
573 target_stack = find_target_stack(atom, patience_stacks,
574 patience_stacks_count);
575 assert(target_stack <= patience_stacks_count);
576 patience_stacks[target_stack] = atom;
577 if (target_stack == patience_stacks_count)
578 patience_stacks_count++;
580 /* Record a back reference to the next stack on the
581 * left, which will form the final longest sequence
582 * later. */
583 PATIENCE(atom).prev_stack = target_stack ?
584 patience_stacks[target_stack - 1] : NULL;
587 int xx;
588 for (xx = 0; xx < patience_stacks_count; xx++) {
589 debug(" %s%d",
590 (xx == target_stack) ? ">" : "",
591 diff_atom_idx(right,
592 PATIENCE(patience_stacks[xx]).pos_in_other));
594 debug("\n");
598 /* backtrace through prev_stack references to form the final
599 * longest common sequence */
600 lcs_tail = patience_stacks[patience_stacks_count - 1];
601 lcs_count = patience_stacks_count;
603 /* uniques and patience_stacks are no longer needed.
604 * Backpointers are in PATIENCE(atom).prev_stack */
605 free(atom_pointers);
608 lcs = recallocarray(NULL, 0, lcs_count, sizeof(struct diff_atom*));
609 struct diff_atom **lcs_backtrace_pos = &lcs[lcs_count - 1];
610 struct diff_atom *atom;
611 for (atom = lcs_tail; atom; atom = PATIENCE(atom).prev_stack, lcs_backtrace_pos--) {
612 assert(lcs_backtrace_pos >= lcs);
613 *lcs_backtrace_pos = atom;
616 unsigned int i;
617 if (DEBUG) {
618 debug("\npatience LCS:\n");
619 for (i = 0; i < lcs_count; i++) {
620 debug("\n L "); debug_dump_atom(left, right, lcs[i]);
621 debug(" R "); debug_dump_atom(right, left,
622 PATIENCE(lcs[i]).pos_in_other);
627 /* TODO: For each common-unique line found (now listed in lcs), swallow
628 * lines upwards and downwards that are identical on each side. Requires
629 * a way to represent atoms being glued to adjacent atoms. */
631 debug("\ntraverse LCS, possibly recursing:\n");
633 /* Now we have pinned positions in both files at which it makes sense to
634 * divide the diff problem into smaller chunks. Go into the next round:
635 * look at each section in turn, trying to again find common-unique
636 * lines in those smaller sections. As soon as no more are found, the
637 * remaining smaller sections are solved by Myers. */
638 /* left_pos and right_pos are indexes in left/right->atoms.head until
639 * which the atoms are already handled (added to result chunks). */
640 unsigned int left_pos = 0;
641 unsigned int right_pos = 0;
642 for (i = 0; i <= lcs_count; i++) {
643 struct diff_atom *atom;
644 struct diff_atom *atom_r;
645 /* left_idx and right_idx are indexes of the start of this
646 * section of identical lines on both sides.
647 * left_pos marks the index of the first still unhandled line,
648 * left_idx is the start of an identical section some way
649 * further down, and this loop adds an unsolved chunk of
650 * [left_pos..left_idx[ and a solved chunk of
651 * [left_idx..identical_lines.end[. */
652 unsigned int left_idx;
653 unsigned int right_idx;
655 debug("iteration %u of %u left_pos %u right_pos %u\n",
656 i, lcs_count, left_pos, right_pos);
658 if (i < lcs_count) {
659 atom = lcs[i];
660 atom_r = PATIENCE(atom).pos_in_other;
661 debug("lcs[%u] = left[%u] = right[%u]\n", i,
662 diff_atom_idx(left, atom), diff_atom_idx(right, atom_r));
663 left_idx = PATIENCE(atom).identical_lines.start;
664 right_idx = PATIENCE(atom_r).identical_lines.start;
665 debug(" identical lines l %u-%u r %u-%u\n",
666 PATIENCE(atom).identical_lines.start, PATIENCE(atom).identical_lines.end,
667 PATIENCE(atom_r).identical_lines.start, PATIENCE(atom_r).identical_lines.end);
668 } else {
669 /* There are no more identical lines until the end of
670 * left and right. */
671 atom = NULL;
672 atom_r = NULL;
673 left_idx = left->atoms.len;
674 right_idx = right->atoms.len;
677 /* 'atom' (if not NULL) now marks an atom that matches on both
678 * sides according to patience-diff (a common-unique identical
679 * atom in both files).
680 * Handle the section before and the atom itself; the section
681 * after will be handled by the next loop iteration -- note that
682 * i loops to last element + 1 ("i <= lcs_count"), so that there
683 * will be another final iteration to pick up the last remaining
684 * items after the last LCS atom.
685 */
687 debug("iteration %u left_pos %u left_idx %u"
688 " right_pos %u right_idx %u\n",
689 i, left_pos, left_idx, right_pos, right_idx);
691 /* Section before the matching atom */
692 struct diff_atom *left_atom = &left->atoms.head[left_pos];
693 unsigned int left_section_len = left_idx - left_pos;
695 struct diff_atom *right_atom = &(right->atoms.head[right_pos]);
696 unsigned int right_section_len = right_idx - right_pos;
698 if (left_section_len && right_section_len) {
699 /* Record an unsolved chunk, the caller will apply
700 * inner_algo() on this chunk. */
701 if (!diff_state_add_chunk(state, false,
702 left_atom, left_section_len,
703 right_atom,
704 right_section_len))
705 goto free_and_exit;
706 } else if (left_section_len && !right_section_len) {
707 /* Only left atoms and none on the right, they form a
708 * "minus" chunk, then. */
709 if (!diff_state_add_chunk(state, true,
710 left_atom, left_section_len,
711 right_atom, 0))
712 goto free_and_exit;
713 } else if (!left_section_len && right_section_len) {
714 /* No left atoms, only atoms on the right, they form a
715 * "plus" chunk, then. */
716 if (!diff_state_add_chunk(state, true,
717 left_atom, 0,
718 right_atom, right_section_len))
719 goto free_and_exit;
721 /* else: left_section_len == 0 and right_section_len == 0, i.e.
722 * nothing here. */
724 /* The atom found to match on both sides forms a chunk of equals
725 * on each side. In the very last iteration of this loop, there
726 * is no matching atom, we were just cleaning out the remaining
727 * lines. */
728 if (atom) {
729 void *ok;
730 ok = diff_state_add_chunk(state, true,
731 left->atoms.head
732 + PATIENCE(atom).identical_lines.start,
733 diff_range_len(&PATIENCE(atom).identical_lines),
734 right->atoms.head
735 + PATIENCE(atom_r).identical_lines.start,
736 diff_range_len(&PATIENCE(atom_r).identical_lines));
737 if (!ok)
738 goto free_and_exit;
739 left_pos = PATIENCE(atom).identical_lines.end;
740 right_pos = PATIENCE(atom_r).identical_lines.end;
741 } else {
742 left_pos = left_idx + 1;
743 right_pos = right_idx + 1;
745 debug("end of iteration %u left_pos %u left_idx %u"
746 " right_pos %u right_idx %u\n",
747 i, left_pos, left_idx, right_pos, right_idx);
749 debug("** END %s\n", __func__);
751 rc = DIFF_RC_OK;
753 free_and_exit:
754 left->root->current = NULL;
755 right->root->current = NULL;
756 free(atom_patience_left);
757 free(atom_patience_right);
758 if (lcs)
759 free(lcs);
760 return rc;