2013-08-31 04:13:05 +05:30
|
|
|
#!/usr/bin/python
|
|
|
|
|
2013-09-12 14:42:15 +05:30
|
|
|
import argparse
|
|
|
|
import sys
|
|
|
|
|
|
|
|
import apparmor.aa as apparmor
|
2013-09-17 22:30:48 +05:30
|
|
|
import apparmor.cleanprofile as cleanprofile
|
2013-09-19 10:32:19 +05:30
|
|
|
|
2013-09-22 15:25:20 +05:30
|
|
|
parser = argparse.ArgumentParser(description=_('Perform a 3way merge on the given profiles'))
|
|
|
|
parser.add_argument('mine', type=str, help=_('your profile'))
|
|
|
|
parser.add_argument('base', type=str, help=_('base profile'))
|
|
|
|
parser.add_argument('other', type=str, help=_('other profile'))
|
|
|
|
parser.add_argument('-d', '--dir', type=str, help=_('path to profiles'))
|
2013-09-22 23:49:19 +05:30
|
|
|
parser.add_argument('-a', '--auto', action='store_true', help=_('Automatically merge profiles, exits incase of *x conflicts'))
|
2013-09-12 14:42:15 +05:30
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
profiles = [args.mine, args.base, args.other]
|
|
|
|
|
|
|
|
print(profiles)
|
|
|
|
|
|
|
|
def main():
|
|
|
|
mergeprofiles = Merge(profiles)
|
2013-09-23 02:14:11 +05:30
|
|
|
#Get rid of common/superfluous stuff
|
|
|
|
mergeprofiles.clear_common()
|
2013-09-22 22:51:30 +05:30
|
|
|
|
2013-09-12 14:42:15 +05:30
|
|
|
class Merge(object):
|
|
|
|
def __init__(self, profiles):
|
|
|
|
user, base, other = profiles
|
2013-09-22 22:51:30 +05:30
|
|
|
|
2013-09-12 14:42:15 +05:30
|
|
|
#Read and parse base profile and save profile data, include data from it and reset them
|
|
|
|
apparmor.read_profile(base, True)
|
2013-09-23 03:47:15 +05:30
|
|
|
self.base = cleanprofile.Prof(base)
|
2013-09-23 02:14:11 +05:30
|
|
|
|
|
|
|
self.reset()
|
2013-09-22 22:51:30 +05:30
|
|
|
|
2013-09-12 14:42:15 +05:30
|
|
|
#Read and parse other profile and save profile data, include data from it and reset them
|
|
|
|
apparmor.read_profile(other, True)
|
2013-09-23 03:47:15 +05:30
|
|
|
self.other = cleanprofile.Prof(other)
|
2013-09-23 02:14:11 +05:30
|
|
|
|
|
|
|
self.reset()
|
2013-09-22 22:51:30 +05:30
|
|
|
|
2013-09-12 14:42:15 +05:30
|
|
|
#Read and parse user profile
|
|
|
|
apparmor.read_profile(profiles[0], True)
|
2013-09-23 03:47:15 +05:30
|
|
|
self.user = cleanprofile.Prof(user)
|
2013-09-22 22:51:30 +05:30
|
|
|
|
2013-09-23 02:14:11 +05:30
|
|
|
def reset(self):
|
2013-09-12 14:42:15 +05:30
|
|
|
apparmor.aa = apparmor.hasher()
|
2013-09-23 02:14:11 +05:30
|
|
|
apparmor.filelist = apparmor.hasher()
|
2013-09-12 14:42:15 +05:30
|
|
|
apparmor.include = dict()
|
2013-09-23 02:14:11 +05:30
|
|
|
apparmor.existing_profiles = apparmor.hasher()
|
|
|
|
apparmor.original_aa = apparmor.hasher()
|
2013-09-22 22:51:30 +05:30
|
|
|
|
2013-09-12 14:42:15 +05:30
|
|
|
def clear_common(self):
|
2013-09-23 02:14:11 +05:30
|
|
|
deleted = 0
|
|
|
|
#Remove off the parts in other profile which are common/superfluous from user profile
|
2013-09-23 03:47:15 +05:30
|
|
|
user_other = cleanprofile.CleanProf(False, self.user, self.other)
|
2013-09-23 02:14:11 +05:30
|
|
|
deleted += user_other.compare_profiles()
|
|
|
|
|
|
|
|
#Remove off the parts in base profile which are common/superfluous from user profile
|
2013-09-23 03:47:15 +05:30
|
|
|
user_base = cleanprofile.CleanProf(False, self.user, self.base)
|
2013-09-23 02:14:11 +05:30
|
|
|
deleted += user_base.compare_profiles()
|
|
|
|
|
|
|
|
#Remove off the parts in other profile which are common/superfluous from base profile
|
2013-09-23 03:47:15 +05:30
|
|
|
base_other = cleanprofile.CleanProf(False, self.base, self.other)
|
2013-09-23 02:14:11 +05:30
|
|
|
deleted += user_base.compare_profiles()
|
|
|
|
|
|
|
|
def ask_the_questions(self):
|
2013-09-12 14:42:15 +05:30
|
|
|
pass
|
2013-09-22 22:51:30 +05:30
|
|
|
|
2013-09-12 14:42:15 +05:30
|
|
|
|
2013-09-23 02:14:11 +05:30
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|
2013-09-12 14:42:15 +05:30
|
|
|
|
|
|
|
# def intersect(ra, rb):
|
|
|
|
# """Given two ranges return the range where they intersect or None.
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# >>> intersect((0, 10), (0, 6))
|
|
|
|
# (0, 6)
|
|
|
|
# >>> intersect((0, 10), (5, 15))
|
|
|
|
# (5, 10)
|
|
|
|
# >>> intersect((0, 10), (10, 15))
|
|
|
|
# >>> intersect((0, 9), (10, 15))
|
|
|
|
# >>> intersect((0, 9), (7, 15))
|
|
|
|
# (7, 9)
|
|
|
|
# """
|
|
|
|
# # preconditions: (ra[0] <= ra[1]) and (rb[0] <= rb[1])
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# sa = max(ra[0], rb[0])
|
|
|
|
# sb = min(ra[1], rb[1])
|
|
|
|
# if sa < sb:
|
|
|
|
# return sa, sb
|
|
|
|
# else:
|
|
|
|
# return None
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# def compare_range(a, astart, aend, b, bstart, bend):
|
|
|
|
# """Compare a[astart:aend] == b[bstart:bend], without slicing.
|
|
|
|
# """
|
|
|
|
# if (aend-astart) != (bend-bstart):
|
|
|
|
# return False
|
|
|
|
# for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
|
|
|
|
# if a[ia] != b[ib]:
|
|
|
|
# return False
|
|
|
|
# else:
|
|
|
|
# return True
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# class Merge3(object):
|
|
|
|
# """3-way merge of texts.
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# Given BASE, OTHER, THIS, tries to produce a combined text
|
|
|
|
# incorporating the changes from both BASE->OTHER and BASE->THIS.
|
|
|
|
# All three will typically be sequences of lines."""
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# def __init__(self, base, a, b, is_cherrypick=False, allow_objects=False):
|
|
|
|
# """Constructor.
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# :param base: lines in BASE
|
|
|
|
# :param a: lines in A
|
|
|
|
# :param b: lines in B
|
|
|
|
# :param is_cherrypick: flag indicating if this merge is a cherrypick.
|
|
|
|
# When cherrypicking b => a, matches with b and base do not conflict.
|
|
|
|
# :param allow_objects: if True, do not require that base, a and b are
|
|
|
|
# plain Python strs. Also prevents BinaryFile from being raised.
|
|
|
|
# Lines can be any sequence of comparable and hashable Python
|
|
|
|
# objects.
|
|
|
|
# """
|
|
|
|
# if not allow_objects:
|
|
|
|
# #textfile.check_text_lines(base)
|
|
|
|
# #textfile.check_text_lines(a)
|
|
|
|
# #textfile.check_text_lines(b)
|
|
|
|
# pass
|
|
|
|
# self.base = base
|
|
|
|
# self.a = a
|
|
|
|
# self.b = b
|
|
|
|
# self.is_cherrypick = is_cherrypick
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# def merge_lines(self,
|
|
|
|
# name_a=None,
|
|
|
|
# name_b=None,
|
|
|
|
# name_base=None,
|
|
|
|
# start_marker='<<<<<<<',
|
|
|
|
# mid_marker='=======',
|
|
|
|
# end_marker='>>>>>>>',
|
|
|
|
# base_marker=None,
|
|
|
|
# reprocess=False):
|
|
|
|
# """Return merge in cvs-like form.
|
|
|
|
# """
|
|
|
|
# newline = '\n'
|
|
|
|
# if len(self.a) > 0:
|
|
|
|
# if self.a[0].endswith('\r\n'):
|
|
|
|
# newline = '\r\n'
|
|
|
|
# elif self.a[0].endswith('\r'):
|
|
|
|
# newline = '\r'
|
|
|
|
# if base_marker and reprocess:
|
|
|
|
# raise errors.CantReprocessAndShowBase()
|
|
|
|
# if name_a:
|
|
|
|
# start_marker = start_marker + ' ' + name_a
|
|
|
|
# if name_b:
|
|
|
|
# end_marker = end_marker + ' ' + name_b
|
|
|
|
# if name_base and base_marker:
|
|
|
|
# base_marker = base_marker + ' ' + name_base
|
|
|
|
# merge_regions = self.merge_regions()
|
|
|
|
# if reprocess is True:
|
|
|
|
# merge_regions = self.reprocess_merge_regions(merge_regions)
|
|
|
|
# for t in merge_regions:
|
|
|
|
# what = t[0]
|
|
|
|
# if what == 'unchanged':
|
|
|
|
# for i in range(t[1], t[2]):
|
|
|
|
# yield self.base[i]
|
|
|
|
# elif what == 'a' or what == 'same':
|
|
|
|
# for i in range(t[1], t[2]):
|
|
|
|
# yield self.a[i]
|
|
|
|
# elif what == 'b':
|
|
|
|
# for i in range(t[1], t[2]):
|
|
|
|
# yield self.b[i]
|
|
|
|
# elif what == 'conflict':
|
|
|
|
# yield start_marker + newline
|
|
|
|
# for i in range(t[3], t[4]):
|
|
|
|
# yield self.a[i]
|
|
|
|
# if base_marker is not None:
|
|
|
|
# yield base_marker + newline
|
|
|
|
# for i in range(t[1], t[2]):
|
|
|
|
# yield self.base[i]
|
|
|
|
# yield mid_marker + newline
|
|
|
|
# for i in range(t[5], t[6]):
|
|
|
|
# yield self.b[i]
|
|
|
|
# yield end_marker + newline
|
|
|
|
# else:
|
|
|
|
# raise ValueError(what)
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# def merge_annotated(self):
|
|
|
|
# """Return merge with conflicts, showing origin of lines.
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# Most useful for debugging merge.
|
|
|
|
# """
|
|
|
|
# for t in self.merge_regions():
|
|
|
|
# what = t[0]
|
|
|
|
# if what == 'unchanged':
|
|
|
|
# for i in range(t[1], t[2]):
|
|
|
|
# yield 'u | ' + self.base[i]
|
|
|
|
# elif what == 'a' or what == 'same':
|
|
|
|
# for i in range(t[1], t[2]):
|
|
|
|
# yield what[0] + ' | ' + self.a[i]
|
|
|
|
# elif what == 'b':
|
|
|
|
# for i in range(t[1], t[2]):
|
|
|
|
# yield 'b | ' + self.b[i]
|
|
|
|
# elif what == 'conflict':
|
|
|
|
# yield '<<<<\n'
|
|
|
|
# for i in range(t[3], t[4]):
|
|
|
|
# yield 'A | ' + self.a[i]
|
|
|
|
# yield '----\n'
|
|
|
|
# for i in range(t[5], t[6]):
|
|
|
|
# yield 'B | ' + self.b[i]
|
|
|
|
# yield '>>>>\n'
|
|
|
|
# else:
|
|
|
|
# raise ValueError(what)
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# def merge_groups(self):
|
|
|
|
# """Yield sequence of line groups. Each one is a tuple:
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# 'unchanged', lines
|
|
|
|
# Lines unchanged from base
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# 'a', lines
|
|
|
|
# Lines taken from a
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# 'same', lines
|
|
|
|
# Lines taken from a (and equal to b)
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# 'b', lines
|
|
|
|
# Lines taken from b
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# 'conflict', base_lines, a_lines, b_lines
|
|
|
|
# Lines from base were changed to either a or b and conflict.
|
|
|
|
# """
|
|
|
|
# for t in self.merge_regions():
|
|
|
|
# what = t[0]
|
|
|
|
# if what == 'unchanged':
|
|
|
|
# yield what, self.base[t[1]:t[2]]
|
|
|
|
# elif what == 'a' or what == 'same':
|
|
|
|
# yield what, self.a[t[1]:t[2]]
|
|
|
|
# elif what == 'b':
|
|
|
|
# yield what, self.b[t[1]:t[2]]
|
|
|
|
# elif what == 'conflict':
|
|
|
|
# yield (what,
|
|
|
|
# self.base[t[1]:t[2]],
|
|
|
|
# self.a[t[3]:t[4]],
|
|
|
|
# self.b[t[5]:t[6]])
|
|
|
|
# else:
|
|
|
|
# raise ValueError(what)
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# def merge_regions(self):
|
|
|
|
# """Return sequences of matching and conflicting regions.
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# This returns tuples, where the first value says what kind we
|
|
|
|
# have:
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# 'unchanged', start, end
|
|
|
|
# Take a region of base[start:end]
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# 'same', astart, aend
|
|
|
|
# b and a are different from base but give the same result
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# 'a', start, end
|
|
|
|
# Non-clashing insertion from a[start:end]
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# Method is as follows:
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# The two sequences align only on regions which match the base
|
|
|
|
# and both descendents. These are found by doing a two-way diff
|
|
|
|
# of each one against the base, and then finding the
|
|
|
|
# intersections between those regions. These "sync regions"
|
|
|
|
# are by definition unchanged in both and easily dealt with.
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# The regions in between can be in any of three cases:
|
|
|
|
# conflicted, or changed on only one side.
|
|
|
|
# """
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# # section a[0:ia] has been disposed of, etc
|
|
|
|
# iz = ia = ib = 0
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions():
|
|
|
|
# matchlen = zend - zmatch
|
|
|
|
# # invariants:
|
|
|
|
# # matchlen >= 0
|
|
|
|
# # matchlen == (aend - amatch)
|
|
|
|
# # matchlen == (bend - bmatch)
|
|
|
|
# len_a = amatch - ia
|
|
|
|
# len_b = bmatch - ib
|
|
|
|
# len_base = zmatch - iz
|
|
|
|
# # invariants:
|
|
|
|
# # assert len_a >= 0
|
|
|
|
# # assert len_b >= 0
|
|
|
|
# # assert len_base >= 0
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# #print 'unmatched a=%d, b=%d' % (len_a, len_b)
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# if len_a or len_b:
|
|
|
|
# # try to avoid actually slicing the lists
|
|
|
|
# same = compare_range(self.a, ia, amatch,
|
|
|
|
# self.b, ib, bmatch)
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# if same:
|
|
|
|
# yield 'same', ia, amatch
|
|
|
|
# else:
|
|
|
|
# equal_a = compare_range(self.a, ia, amatch,
|
|
|
|
# self.base, iz, zmatch)
|
|
|
|
# equal_b = compare_range(self.b, ib, bmatch,
|
|
|
|
# self.base, iz, zmatch)
|
|
|
|
# if equal_a and not equal_b:
|
|
|
|
# yield 'b', ib, bmatch
|
|
|
|
# elif equal_b and not equal_a:
|
|
|
|
# yield 'a', ia, amatch
|
|
|
|
# elif not equal_a and not equal_b:
|
|
|
|
# if self.is_cherrypick:
|
|
|
|
# for node in self._refine_cherrypick_conflict(
|
|
|
|
# iz, zmatch, ia, amatch,
|
|
|
|
# ib, bmatch):
|
|
|
|
# yield node
|
|
|
|
# else:
|
|
|
|
# yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
|
|
|
|
# else:
|
|
|
|
# raise AssertionError("can't handle a=b=base but unmatched")
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# ia = amatch
|
|
|
|
# ib = bmatch
|
|
|
|
# iz = zmatch
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# # if the same part of the base was deleted on both sides
|
|
|
|
# # that's OK, we can just skip it.
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# if matchlen > 0:
|
|
|
|
# # invariants:
|
|
|
|
# # assert ia == amatch
|
|
|
|
# # assert ib == bmatch
|
|
|
|
# # assert iz == zmatch
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# yield 'unchanged', zmatch, zend
|
|
|
|
# iz = zend
|
|
|
|
# ia = aend
|
|
|
|
# ib = bend
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# def _refine_cherrypick_conflict(self, zstart, zend, astart, aend, bstart, bend):
|
|
|
|
# """When cherrypicking b => a, ignore matches with b and base."""
|
|
|
|
# # Do not emit regions which match, only regions which do not match
|
|
|
|
# matches = patiencediff.PatienceSequenceMatcher(None,
|
|
|
|
# self.base[zstart:zend], self.b[bstart:bend]).get_matching_blocks()
|
|
|
|
# last_base_idx = 0
|
|
|
|
# last_b_idx = 0
|
|
|
|
# last_b_idx = 0
|
|
|
|
# yielded_a = False
|
|
|
|
# for base_idx, b_idx, match_len in matches:
|
|
|
|
# conflict_z_len = base_idx - last_base_idx
|
|
|
|
# conflict_b_len = b_idx - last_b_idx
|
|
|
|
# if conflict_b_len == 0: # There are no lines in b which conflict,
|
|
|
|
# # so skip it
|
|
|
|
# pass
|
|
|
|
# else:
|
|
|
|
# if yielded_a:
|
|
|
|
# yield ('conflict',
|
|
|
|
# zstart + last_base_idx, zstart + base_idx,
|
|
|
|
# aend, aend, bstart + last_b_idx, bstart + b_idx)
|
|
|
|
# else:
|
|
|
|
# # The first conflict gets the a-range
|
|
|
|
# yielded_a = True
|
|
|
|
# yield ('conflict', zstart + last_base_idx, zstart +
|
|
|
|
# base_idx,
|
|
|
|
# astart, aend, bstart + last_b_idx, bstart + b_idx)
|
|
|
|
# last_base_idx = base_idx + match_len
|
|
|
|
# last_b_idx = b_idx + match_len
|
|
|
|
# if last_base_idx != zend - zstart or last_b_idx != bend - bstart:
|
|
|
|
# if yielded_a:
|
|
|
|
# yield ('conflict', zstart + last_base_idx, zstart + base_idx,
|
|
|
|
# aend, aend, bstart + last_b_idx, bstart + b_idx)
|
|
|
|
# else:
|
|
|
|
# # The first conflict gets the a-range
|
|
|
|
# yielded_a = True
|
|
|
|
# yield ('conflict', zstart + last_base_idx, zstart + base_idx,
|
|
|
|
# astart, aend, bstart + last_b_idx, bstart + b_idx)
|
|
|
|
# if not yielded_a:
|
|
|
|
# yield ('conflict', zstart, zend, astart, aend, bstart, bend)
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# def reprocess_merge_regions(self, merge_regions):
|
|
|
|
# """Where there are conflict regions, remove the agreed lines.
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# Lines where both A and B have made the same changes are
|
|
|
|
# eliminated.
|
|
|
|
# """
|
|
|
|
# for region in merge_regions:
|
|
|
|
# if region[0] != "conflict":
|
|
|
|
# yield region
|
|
|
|
# continue
|
|
|
|
# type, iz, zmatch, ia, amatch, ib, bmatch = region
|
|
|
|
# a_region = self.a[ia:amatch]
|
|
|
|
# b_region = self.b[ib:bmatch]
|
|
|
|
# matches = patiencediff.PatienceSequenceMatcher(
|
|
|
|
# None, a_region, b_region).get_matching_blocks()
|
|
|
|
# next_a = ia
|
|
|
|
# next_b = ib
|
|
|
|
# for region_ia, region_ib, region_len in matches[:-1]:
|
|
|
|
# region_ia += ia
|
|
|
|
# region_ib += ib
|
|
|
|
# reg = self.mismatch_region(next_a, region_ia, next_b,
|
|
|
|
# region_ib)
|
|
|
|
# if reg is not None:
|
|
|
|
# yield reg
|
|
|
|
# yield 'same', region_ia, region_len+region_ia
|
|
|
|
# next_a = region_ia + region_len
|
|
|
|
# next_b = region_ib + region_len
|
|
|
|
# reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
|
|
|
|
# if reg is not None:
|
|
|
|
# yield reg
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# @staticmethod
|
|
|
|
# def mismatch_region(next_a, region_ia, next_b, region_ib):
|
|
|
|
# if next_a < region_ia or next_b < region_ib:
|
|
|
|
# return 'conflict', None, None, next_a, region_ia, next_b, region_ib
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# def find_sync_regions(self):
|
|
|
|
# """Return a list of sync regions, where both descendents match the base.
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# Generates a list of (base1, base2, a1, a2, b1, b2). There is
|
|
|
|
# always a zero-length sync region at the end of all the files.
|
|
|
|
# """
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# ia = ib = 0
|
|
|
|
# amatches = patiencediff.PatienceSequenceMatcher(
|
|
|
|
# None, self.base, self.a).get_matching_blocks()
|
|
|
|
# bmatches = patiencediff.PatienceSequenceMatcher(
|
|
|
|
# None, self.base, self.b).get_matching_blocks()
|
|
|
|
# len_a = len(amatches)
|
|
|
|
# len_b = len(bmatches)
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# sl = []
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# while ia < len_a and ib < len_b:
|
|
|
|
# abase, amatch, alen = amatches[ia]
|
|
|
|
# bbase, bmatch, blen = bmatches[ib]
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# # there is an unconflicted block at i; how long does it
|
|
|
|
# # extend? until whichever one ends earlier.
|
|
|
|
# i = intersect((abase, abase+alen), (bbase, bbase+blen))
|
|
|
|
# if i:
|
|
|
|
# intbase = i[0]
|
|
|
|
# intend = i[1]
|
|
|
|
# intlen = intend - intbase
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# # found a match of base[i[0], i[1]]; this may be less than
|
|
|
|
# # the region that matches in either one
|
|
|
|
# # assert intlen <= alen
|
|
|
|
# # assert intlen <= blen
|
|
|
|
# # assert abase <= intbase
|
|
|
|
# # assert bbase <= intbase
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# asub = amatch + (intbase - abase)
|
|
|
|
# bsub = bmatch + (intbase - bbase)
|
|
|
|
# aend = asub + intlen
|
|
|
|
# bend = bsub + intlen
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# # assert self.base[intbase:intend] == self.a[asub:aend], \
|
|
|
|
# # (self.base[intbase:intend], self.a[asub:aend])
|
|
|
|
# # assert self.base[intbase:intend] == self.b[bsub:bend]
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# sl.append((intbase, intend,
|
|
|
|
# asub, aend,
|
|
|
|
# bsub, bend))
|
|
|
|
# # advance whichever one ends first in the base text
|
|
|
|
# if (abase + alen) < (bbase + blen):
|
|
|
|
# ia += 1
|
|
|
|
# else:
|
|
|
|
# ib += 1
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# intbase = len(self.base)
|
|
|
|
# abase = len(self.a)
|
|
|
|
# bbase = len(self.b)
|
|
|
|
# sl.append((intbase, intbase, abase, abase, bbase, bbase))
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# return sl
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# def find_unconflicted(self):
|
|
|
|
# """Return a list of ranges in base that are not conflicted."""
|
|
|
|
# am = patiencediff.PatienceSequenceMatcher(
|
|
|
|
# None, self.base, self.a).get_matching_blocks()
|
|
|
|
# bm = patiencediff.PatienceSequenceMatcher(
|
|
|
|
# None, self.base, self.b).get_matching_blocks()
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# unc = []
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# while am and bm:
|
|
|
|
# # there is an unconflicted block at i; how long does it
|
|
|
|
# # extend? until whichever one ends earlier.
|
|
|
|
# a1 = am[0][0]
|
|
|
|
# a2 = a1 + am[0][2]
|
|
|
|
# b1 = bm[0][0]
|
|
|
|
# b2 = b1 + bm[0][2]
|
|
|
|
# i = intersect((a1, a2), (b1, b2))
|
|
|
|
# if i:
|
|
|
|
# unc.append(i)
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# if a2 < b2:
|
|
|
|
# del am[0]
|
|
|
|
# else:
|
|
|
|
# del bm[0]
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# return unc
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# a = file(profiles[0], 'rt').readlines()
|
|
|
|
# base = file(profiles[1], 'rt').readlines()
|
|
|
|
# b = file(profiles[2], 'rt').readlines()
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-12 14:42:15 +05:30
|
|
|
# m3 = Merge3(base, a, b)
|
2013-09-22 22:51:30 +05:30
|
|
|
#
|
2013-09-19 10:32:19 +05:30
|
|
|
# sys.stdout.write(m3.merge_annotated())
|