[Debian-islamic-commits] [SCM] Packaging for Thawab branch, master, updated. upstream/3.0.10-58-g9dca20d
أحمد المحمو =?UTF-8?Q?=D8=AF=D9=8A=20?=(Ahmed El-Mahmoudy)
aelmahmoudy at sabily.org
Thu Jul 28 12:56:47 UTC 2011
The following commit has been merged in the master branch:
commit ac990bd6f9fde0140d603135b3b857aeeb6e7681
Author: أحمد المحمودي (Ahmed El-Mahmoudy) <aelmahmoudy at sabily.org>
Date: Mon Feb 21 23:07:41 2011 +0200
Removed import-regexp-fix.diff and whoosh1.3.3.diff patches as they are
included in new upstream.
diff --git a/debian/patches/import-regexp-fix.diff b/debian/patches/import-regexp-fix.diff
deleted file mode 100644
index 68d79d7..0000000
--- a/debian/patches/import-regexp-fix.diff
+++ /dev/null
@@ -1,64 +0,0 @@
-Description: fix import bug in الشرح الممتع
-Origin: http://git.ojuba.org/cgit/thawab/commit/?id=e3db0403acc1ae6743c9cda757ccbdcc780b07d4
-Author: Muayyad Alsadi مؤيد السعدي <alsadi at ojuba.org>
-
-
-diff --git a/Thawab/shamelaUtils.py b/Thawab/shamelaUtils.py
-index b721828..71f83d1 100644
---- a/Thawab/shamelaUtils.py
-+++ b/Thawab/shamelaUtils.py
-@@ -398,6 +398,17 @@ def set_get_xref(xref, h_tags, sh, bkid, pg_id, matn, matnid):
- xref=sh.get_xref(matn, matnid)
- if xref: h_tags['embed.original.section']=xref
-
-+ss_re=re.compile(" +")
-+re_ss_re=re.compile("( \*){2,}")
-+
-+def ss(txt):
-+ """squeeze spaces"""
-+ return ss_re.sub(" ", txt)
-+
-+def re_ss(txt):
-+ """squeeze spaces in re"""
-+ return re_ss_re.sub(" *", ss(txt))
-+
-
- def shamelaImport(cursor, sh, bkid, footnote_re=ur'\((\d+)\)', body_footnote_re=ur'\((\d+)\)', ft_prefix_len=1, ft_suffix_len=1):
- """
-@@ -493,30 +504,30 @@ def shamelaImport(cursor, sh, bkid, footnote_re=ur'\((\d+)\)', body_footnote_re=
- h_p=no_w_re.sub(' ', h.translate(sh_normalize_tb)).strip()
- if h_p: # if normalized h_p is not empty
- # NOTE: no need for map h_p on re.escape() because it does not contain special chars
-- h_re_entire_line=re.compile(ur"^\s*%s\s*$" % ur" *".join(list(h_p)), re.M)
-+ h_re_entire_line=re.compile(re_ss(ur"^\s*%s\s*$" % ur" *".join(list(h_p))), re.M)
- if _shamelaFindHeadings(txt, page_id, d, h, h_re_entire_line, ix, j, 2): continue
-
- if not txt_no_d: txt_no_d=txt.translate(sh_digits_to_spaces_tb)
- h_p_no_d=h_p.translate(sh_digits_to_spaces_tb).strip()
- if h_p_no_d:
-- h_re_entire_line_no_d=re.compile(ur"^\s*%s\s*$" % ur" *".join(list(h_p_no_d)), re.M)
-+ h_re_entire_line_no_d=re.compile(re_ss(ur"^\s*%s\s*$" % ur" *".join(list(h_p_no_d))), re.M)
- if _shamelaFindHeadings(txt_no_d, page_id, d, h, h_re_entire_line_no_d, ix, j, 3): continue
-
- # at the beginning of the line
- if _shamelaFindExactHeadings(page_txt, page_id, "\n%s", d, h, ix,j, 4): continue
- if h_p:
-- h_re_line_start=re.compile(ur"^\s*%s\s*" % ur" *".join(list(h_p)), re.M)
-+ h_re_line_start=re.compile(re_ss(ur"^\s*%s\s*" % ur" *".join(list(h_p))), re.M)
- if _shamelaFindHeadings(txt, page_id, d, h, h_re_line_start, ix, j, 5): continue
- if h_p_no_d:
-- h_re_line_start_no_d=re.compile(ur"^\s*%s\s*" % ur" *".join(list(h_p_no_d)), re.M)
-+ h_re_line_start_no_d=re.compile(re_ss(ur"^\s*%s\s*" % ur" *".join(list(h_p_no_d))), re.M)
- if _shamelaFindHeadings(txt_no_d, page_id, d, h, h_re_line_start_no_d, ix, j, 6): continue
- # any where in the line
- if _shamelaFindExactHeadings(page_txt, page_id, "%s", d, h, ix,j, 7): continue
- if h_p:
-- h_re_any_ware=re.compile(ur"\s*%s\s*" % ur" *".join(list(h_p)), re.M)
-+ h_re_any_ware=re.compile(re_ss(ur"\s*%s\s*" % ur" *".join(list(h_p))), re.M)
- if _shamelaFindHeadings(txt, page_id, d, h, h_re_any_ware, ix, j, 8): continue
- if h_p_no_d:
-- h_re_any_ware_no_d=re.compile(ur"\s*%s\s*" % ur" *".join(list(h_p_no_d)), re.M)
-+ h_re_any_ware_no_d=re.compile(re_ss(ur"\s*%s\s*" % ur" *".join(list(h_p_no_d))), re.M)
- if _shamelaFindHeadings(txt_no_d, page_id, d, h, h_re_any_ware, ix, j, 9): continue
- # if we reached here then head is not found
- # place it just after last one
diff --git a/debian/patches/series b/debian/patches/series
index c25160c..99c63bd 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,6 +1,4 @@
private-pkg.diff
separate_setup.py.diff
desktop-categories.diff
-whoosh1.3.3.diff
po_copyright.diff
-import-regexp-fix.diff
diff --git a/debian/patches/whoosh1.3.3.diff b/debian/patches/whoosh1.3.3.diff
deleted file mode 100644
index df30fe0..0000000
--- a/debian/patches/whoosh1.3.3.diff
+++ /dev/null
@@ -1,458 +0,0 @@
-Description: separate our custom parser and update for whoosh 1.3.3
-Origin: http://git.ojuba.org/cgit/thawab/commit/?id=cc5f60064e626c649a882de8363e4b08a53957ac
-Author: Muayyad Alsadi مؤيد السعدي <alsadi at ojuba.org>
-
-
-diff --git a/Thawab/whooshSearchEngine.py b/Thawab/whooshSearchEngine.py
-index 1c37d9e..76bbd10 100644
---- a/Thawab/whooshSearchEngine.py
-+++ b/Thawab/whooshSearchEngine.py
-@@ -36,227 +36,8 @@ def stemfn(word): return stemArabic(stem(word))
- # word_re=ur"[\w\u064e\u064b\u064f\u064c\u0650\u064d\u0652\u0651\u0640]"
- analyzer=StandardAnalyzer(expression=ur"[\w\u064e\u064b\u064f\u064c\u0650\u064d\u0652\u0651\u0640]+(?:\.?[\w\u064e\u064b\u064f\u064c\u0650\u064d\u0652\u0651\u0640]+)*") | StemFilter(stemfn)
-
--#from whoosh.fields import FieldType, KeywordAnalyzer
--#try: from whoosh.fields import Existence
--#except ImportError: from whoosh.fields import Existance as Existence
--
--#class TAGSLIST(FieldType):
--# """
--# Configured field type for fields containing space-separated or comma-separated
--# keyword-like data (such as tags). The default is to not store positional information
--# (so phrase searching is not allowed in this field) and to not make the field scorable.
--#
--# unlike KEYWORD field type, TAGS list does not count frequency just existence.
--# """
--#
--# def __init__(self, stored = False, lowercase = False, commas = False,
--# scorable = False, unique = False, field_boost = 1.0):
--# """
--# :stored: Whether to store the value of the field with the document.
--# :comma: Whether this is a comma-separated field. If this is False
--# (the default), it is treated as a space-separated field.
--# :scorable: Whether this field is scorable.
--# """
--#
--# ana = KeywordAnalyzer(lowercase = lowercase, commas = commas)
--# self.format = Existence(analyzer = ana, field_boost = field_boost)
--# self.scorable = scorable
--# self.stored = stored
--# self.unique = unique
--
--from whoosh.qparser import MultifieldParser, FieldAliasPlugin, QueryParserError, BoostPlugin, GroupPlugin, PhrasePlugin, RangePlugin, SingleQuotesPlugin, Group, AndGroup, OrGroup, AndNotGroup, AndMaybeGroup, Singleton, BasicSyntax, Plugin, White, Token
--
--from whoosh.qparser import CompoundsPlugin, NotPlugin, WildcardPlugin
--
--class ThCompoundsPlugin(Plugin):
-- """Adds the ability to use &, |, &~, and &! to specify
-- query constraints.
--
-- This plugin is included in the default parser configuration.
-- """
--
-- def tokens(self):
-- return ((ThCompoundsPlugin.AndNot, -10), (ThCompoundsPlugin.AndMaybe, -5), (ThCompoundsPlugin.And, 0),
-- (ThCompoundsPlugin.Or, 0))
--
-- def filters(self):
-- return ((ThCompoundsPlugin.do_compounds, 600), )
--
-- @staticmethod
-- def do_compounds(parser, stream):
-- newstream = stream.empty()
-- i = 0
-- while i < len(stream):
-- t = stream[i]
-- ismiddle = newstream and i < len(stream) - 1
-- if isinstance(t, Group):
-- newstream.append(ThCompoundsPlugin.do_compounds(parser, t))
-- elif isinstance(t, (ThCompoundsPlugin.And, ThCompoundsPlugin.Or)):
-- if isinstance(t, ThCompoundsPlugin.And):
-- cls = AndGroup
-- else:
-- cls = OrGroup
--
-- if cls != type(newstream) and ismiddle:
-- last = newstream.pop()
-- rest = ThCompoundsPlugin.do_compounds(parser, cls(stream[i+1:]))
-- newstream.append(cls([last, rest]))
-- break
--
-- elif isinstance(t, ThCompoundsPlugin.AndNot):
-- if ismiddle:
-- last = newstream.pop()
-- i += 1
-- next = stream[i]
-- if isinstance(next, Group):
-- next = ThCompoundsPlugin.do_compounds(parser, next)
-- newstream.append(AndNotGroup([last, next]))
--
-- elif isinstance(t, ThCompoundsPlugin.AndMaybe):
-- if ismiddle:
-- last = newstream.pop()
-- i += 1
-- next = stream[i]
-- if isinstance(next, Group):
-- next = ThCompoundsPlugin.do_compounds(parser, next)
-- newstream.append(AndMaybeGroup([last, next]))
-- else:
-- newstream.append(t)
-- i += 1
--
-- return newstream
--
-- class And(Singleton):
-- expr = re.compile(u"&")
--
-- class Or(Singleton):
-- expr = re.compile(u"\|")
--
-- class AndNot(Singleton):
-- expr = re.compile(u"&!")
--
-- class AndMaybe(Singleton):
-- expr = re.compile(u"&~") # when using Arabic keyboard ~ is shift+Z
--
--class ThFieldsPlugin(Plugin):
-- """Adds the ability to specify the field of a clause using a colon.
--
-- This plugin is included in the default parser configuration.
-- """
--
-- def tokens(self):
-- return ((ThFieldsPlugin.Field, 0), )
--
-- def filters(self):
-- return ((ThFieldsPlugin.do_fieldnames, 100), )
--
-- @staticmethod
-- def do_fieldnames(parser, stream):
-- newstream = stream.empty()
-- newname = None
-- for i, t in enumerate(stream):
-- if isinstance(t, ThFieldsPlugin.Field):
-- valid = False
-- if i < len(stream) - 1:
-- next = stream[i+1]
-- if not isinstance(next, (White, ThFieldsPlugin.Field)):
-- newname = t.fieldname
-- valid = True
-- if not valid:
-- newstream.append(Word(t.fieldname, fieldname=parser.fieldname))
-- continue
--
-- if isinstance(t, Group):
-- t = ThFieldsPlugin.do_fieldnames(parser, t)
-- newstream.append(t.set_fieldname(newname))
-- newname = None
--
-- return newstream
--
-- class Field(Token):
-- expr = re.compile(u"(\w[\w\d]*):", re.U)
--
-- def __init__(self, fieldname):
-- self.fieldname = fieldname
--
-- def __repr__(self):
-- return "<%s:>" % self.fieldname
--
-- def set_fieldname(self, fieldname):
-- return self.__class__(fieldname)
--
-- @classmethod
-- def create(cls, parser, match):
-- return cls(match.group(1))
--
--class ThNotPlugin(Plugin):
-- """Adds the ability to negate a clause by preceding it with !.
--
-- This plugin is included in the default parser configuration.
-- """
--
-- def tokens(self):
-- return ((ThNotPlugin.Not, 0), )
--
-- def filters(self):
-- return ((ThNotPlugin.do_not, 800), )
--
-- @staticmethod
-- def do_not(parser, stream):
-- newstream = stream.empty()
-- notnext = False
-- for t in stream:
-- if isinstance(t, ThNotPlugin.Not):
-- notnext = True
-- continue
--
-- if notnext:
-- t = NotGroup([t])
-- newstream.append(t)
-- notnext = False
--
-- return newstream
--
-- class Not(Singleton):
-- expr = re.compile(u"!")
--
--class ThWildcardPlugin(Plugin):
-- """Adds the ability to specify wildcard queries by using asterisk and
-- question mark characters in terms. Note that these types can be very
-- performance and memory intensive. You may consider not including this
-- type of query.
--
-- This plugin is included in the default parser configuration.
-- """
--
-- def tokens(self):
-- return ((ThWildcardPlugin.Wild, 0), )
--
-- class Wild(BasicSyntax):
-- expr = re.compile(u"[^ \t\r\n*?]*(\\*|\\?|؟)\\S*")
-- qclass = query.Wildcard
--
-- def __repr__(self):
-- r = "%s:wild(%r)" % (self.fieldname, self.text)
-- if self.boost != 1.0:
-- r += "^%s" % self.boost
-- return r
--
-- @classmethod
-- def create(cls, parser, match):
-- return cls(match.group(0).replace(u'؟',u'?'))
--
--def ThMultifieldParser(schema=None):
-- plugins = (BoostPlugin, ThCompoundsPlugin, ThFieldsPlugin, GroupPlugin,
-- ThNotPlugin, PhrasePlugin, RangePlugin, SingleQuotesPlugin,
-- ThWildcardPlugin, FieldAliasPlugin({
-- u"kitab":(u"كتاب",),
-- u"title":(u"عنوان",),
-- u"tags":(u"وسوم",)})
-- )
-- p = MultifieldParser(("title","content",), schema=schema, plugins=plugins)
-- # to add a plugin use: p.add_plugin(XYZ)
-- return p
-+from whoosh.qparser import FieldAliasPlugin
-+from whooshSymbolicQParser import MultifieldSQParser
-
- class ExcerptFormatter(object):
- def __init__(self, between = "..."):
-@@ -304,7 +85,12 @@ class SearchEngine(BaseSearchEngine):
- )
- self.indexer=create_in(ix_dir,schema)
- #self.__ix_qparser = ThMultifieldParser(self.th, ("title","content",), schema=self.indexer.schema)
-- self.__ix_qparser = ThMultifieldParser(self.indexer.schema)
-+ self.__ix_qparser = MultifieldSQParser(("title","content",), self.indexer.schema)
-+ self.__ix_qparser.add_plugin(FieldAliasPlugin({
-+ u"kitab":(u"كتاب",),
-+ u"title":(u"عنوان",),
-+ u"tags":(u"وسوم",)})
-+ )
- #self.__ix_pre=whoosh.query.Prefix
- self.__ix_searcher= self.indexer.searcher()
-
-@@ -315,7 +101,9 @@ class SearchEngine(BaseSearchEngine):
- """
- return a Version-Release string if in index, otherwise return None
- """
-- d=self.__ix_searcher.document(kitab=unicode(makeId(name)))
-+ try: d=self.__ix_searcher.document(kitab=unicode(makeId(name)))
-+ except TypeError: return None
-+ except KeyError: return None
- if d: return d['vrr']
- return None
-
-diff --git a/Thawab/whooshSymbolicQParser.py b/Thawab/whooshSymbolicQParser.py
-new file mode 100644
-index 0000000..14ed6f9
---- /dev/null
-+++ b/Thawab/whooshSymbolicQParser.py
-@@ -0,0 +1,188 @@
-+# -*- coding: UTF-8 -*-
-+"""
-+
-+Copyright © 2010, Muayyad Alsadi <alsadi at ojuba.org>
-+
-+"""
-+
-+import sys, os, os.path, re
-+
-+from whoosh import query
-+from whoosh.qparser import *
-+
-+class SCompoundsPlugin(Plugin):
-+ """Adds the ability to use &, |, &~, and &! to specify
-+ query constraints.
-+ """
-+
-+ def tokens(self, parser):
-+ return ((SCompoundsPlugin.AndNot, -10), (SCompoundsPlugin.AndMaybe, -5), (SCompoundsPlugin.And, 0),
-+ (SCompoundsPlugin.Or, 0))
-+
-+ def filters(self, parser):
-+ return ((SCompoundsPlugin.do_compounds, 600), )
-+
-+ @staticmethod
-+ def do_compounds(parser, stream):
-+ newstream = stream.empty()
-+ i = 0
-+ while i < len(stream):
-+ t = stream[i]
-+ ismiddle = newstream and i < len(stream) - 1
-+ if isinstance(t, Group):
-+ newstream.append(SCompoundsPlugin.do_compounds(parser, t))
-+ elif isinstance(t, (SCompoundsPlugin.And, SCompoundsPlugin.Or)):
-+ if isinstance(t, SCompoundsPlugin.And):
-+ cls = AndGroup
-+ else:
-+ cls = OrGroup
-+
-+ if cls != type(newstream) and ismiddle:
-+ last = newstream.pop()
-+ rest = SCompoundsPlugin.do_compounds(parser, cls(stream[i+1:]))
-+ newstream.append(cls([last, rest]))
-+ break
-+
-+ elif isinstance(t, SCompoundsPlugin.AndNot):
-+ if ismiddle:
-+ last = newstream.pop()
-+ i += 1
-+ next = stream[i]
-+ if isinstance(next, Group):
-+ next = SCompoundsPlugin.do_compounds(parser, next)
-+ newstream.append(AndNotGroup([last, next]))
-+
-+ elif isinstance(t, SCompoundsPlugin.AndMaybe):
-+ if ismiddle:
-+ last = newstream.pop()
-+ i += 1
-+ next = stream[i]
-+ if isinstance(next, Group):
-+ next = SCompoundsPlugin.do_compounds(parser, next)
-+ newstream.append(AndMaybeGroup([last, next]))
-+ else:
-+ newstream.append(t)
-+ i += 1
-+
-+ return newstream
-+
-+ class And(Singleton):
-+ expr = re.compile(u"&")
-+
-+ class Or(Singleton):
-+ expr = re.compile(u"\|")
-+
-+ class AndNot(Singleton):
-+ expr = re.compile(u"&!")
-+
-+ class AndMaybe(Singleton):
-+ expr = re.compile(u"&~") # when using Arabic keyboard ~ is shift+Z
-+
-+class SFieldsPlugin(Plugin):
-+ """This plugin does not require an English field name, so that my field aliases work"""
-+
-+ def tokens(self, parser):
-+ return ((SFieldsPlugin.Field, 0), )
-+
-+ def filters(self, parser):
-+ return ((SFieldsPlugin.do_fieldnames, 100), )
-+
-+ @staticmethod
-+ def do_fieldnames(parser, stream):
-+ newstream = stream.empty()
-+ newname = None
-+ for i, t in enumerate(stream):
-+ if isinstance(t, SFieldsPlugin.Field):
-+ valid = False
-+ if i < len(stream) - 1:
-+ next = stream[i+1]
-+ if not isinstance(next, (White, SFieldsPlugin.Field)):
-+ newname = t.fieldname
-+ valid = True
-+ if not valid:
-+ newstream.append(Word(t.fieldname, fieldname=parser.fieldname))
-+ continue
-+
-+ if isinstance(t, Group):
-+ t = SFieldsPlugin.do_fieldnames(parser, t)
-+ newstream.append(t.set_fieldname(newname))
-+ newname = None
-+
-+ return newstream
-+
-+ class Field(Token):
-+ expr = re.compile(u"(\w[\w\d]*):", re.U)
-+
-+ def __init__(self, fieldname):
-+ self.fieldname = fieldname
-+
-+ def __repr__(self):
-+ return "<%s:>" % self.fieldname
-+
-+ def set_fieldname(self, fieldname):
-+ return self.__class__(fieldname)
-+
-+ @classmethod
-+ def create(cls, parser, match):
-+ return cls(match.group(1))
-+
-+class SNotPlugin(Plugin):
-+ """Adds the ability to negate a clause by preceding it with !.
-+ """
-+
-+ def tokens(self, parser):
-+ return ((SNotPlugin.Not, 0), )
-+
-+ def filters(self, parser):
-+ return ((SNotPlugin.do_not, 800), )
-+
-+ @staticmethod
-+ def do_not(parser, stream):
-+ newstream = stream.empty()
-+ notnext = False
-+ for t in stream:
-+ if isinstance(t, SNotPlugin.Not):
-+ notnext = True
-+ continue
-+
-+ if notnext:
-+ t = NotGroup([t])
-+ newstream.append(t)
-+ notnext = False
-+
-+ return newstream
-+
-+ class Not(Singleton):
-+ expr = re.compile(u"!")
-+
-+class SWildcardPlugin(Plugin):
-+ """Adds the ability to specify wildcard queries by using asterisk and
-+ question mark characters in terms. Note that these types can be very
-+ performance and memory intensive. You may consider not including this
-+ type of query.
-+ """
-+
-+ def tokens(self, parser):
-+ return ((SWildcardPlugin.Wild, 0), )
-+
-+ class Wild(BasicSyntax):
-+ expr = re.compile(u"[^ \t\r\n*?]*(\\*|\\?|؟)\\S*")
-+ qclass = query.Wildcard
-+
-+ def __repr__(self):
-+ r = "%s:wild(%r)" % (self.fieldname, self.text)
-+ if self.boost != 1.0:
-+ r += "^%s" % self.boost
-+ return r
-+
-+ @classmethod
-+ def create(cls, parser, match):
-+ return cls(match.group(0).replace(u'؟',u'?'))
-+
-+def MultifieldSQParser(fieldnames, schema=None, fieldboosts=None, **kwargs):
-+ plugins = (BoostPlugin, SCompoundsPlugin, SFieldsPlugin, GroupPlugin,
-+ SNotPlugin, PhrasePlugin, RangePlugin, SingleQuotesPlugin,
-+ SWildcardPlugin)
-+ p = MultifieldParser(fieldnames, schema, fieldboosts, plugins=plugins, **kwargs)
-+ return p
-+
--
Packaging for Thawab
More information about the Debian-islamic-commits
mailing list