« Auto-éditer un wikilivre/addappendix/reconstruction et tests du script addappendix » : différence entre les versions

Contenu supprimé Contenu ajouté
→‎script addapendix.sh : WL:RD : * diverses retouches
m →‎script addapendix.sh : WL:RD : ! mise en page, * diverses retouches
Ligne 32 :
#H Created : 220113 by GC
#H Updated : 220221 by GC for page ScliC
#O Organizational chart
#O -------------------------------
#P Programmers notes
#P -------------------------------
 
VERSION=220308220310
TEXTDOMAIN=addappendix
TEXTDOMAINDIR="/usr/share/locale"
Ligne 39 ⟶ 43 :
export TEXTDOMAINDIR
 
#P . gettext for translation
. gettext.sh
 
#O Script begin here
#O If parameters is empty
if test -z $1
#O Then print the short syntax ant exit -1
then
echo -n -e "\033[31m"
echo -n $"No parameter. addappendix [ <full urlURL of book> | ? | --v ]"
echo -e "\033[0m"
exit -1
fi
 
#O If firt parameter is '?'
if [ "$1" = "?" ]
#O Then print syntax whih examples and exit 0
then
echo -n -e "\033[32m"
echo $"Syntax: addappendix [ <full urlURL of book> | ? | --v ]"
echo $" Example 1 : addappendix https://en.wikibooks.org/wiki/Wikibooks:Collections/Guide_to_Unix"
echo $" Example 2 : addappendix https://fr.wikibooks.org/wiki/Wikilivres:Compilations/Faire_sa_fleur_de_sel"
Ligne 58 ⟶ 68 :
exit 0
fi
#O IF first parameter is "--v"
if [ "$1" = "--v" ]
#O Then print addapendix version
then
echo -n -e "\033[32m"
Ligne 66 ⟶ 78 :
fi
#O *** First parameter analysis ***
#T echo "$1"
#O Test if the first parameter points to wikibooks.org/wiki
if echo $1 | grep wikibooks.org/wiki
then
Ligne 77 ⟶ 92 :
exit -1
fi
#O Check if $1 file exist
if wget --spider $1 2>/dev/null; then
echo -n -e "\033[32m"
Ligne 88 ⟶ 104 :
fi
 
#O Find the bookname
echo $1 | awk -F"/" ' { print $NF }' > bookname.txt
read Bookname<bookname.txt
echo; echo -n -e "\033[1;32m"
echo -n $"Book name : $Bookname"
echo -e "\033[0m"
Ligne 96 ⟶ 113 :
echo $1 | awk -F"/" ' { print $3 }' > site.txt
read Site<site.txt
echo; echo -n -e "\033[1;32m"
echo -n $"Site name : $Site"
echo -e "\033[0m"
Ligne 112 ⟶ 129 :
echo -e "\033[0m"
 
#O Create Bookname directory
install -d ~/Add_appendix/books/$Bookname
Projectdir=~/Add_appendix/books/$Bookname
#O Create temp directory in Workdir
Workdir=~/Add_appendix
mkdir -p $Workdir/temp
#O ============================================================================
##O Create the file bookname.suffix
#T ***********************
#O Create $Projectdir/resources/TMP to download
mkdir -p $Projectdir/resources/TMP
#O Download $1
cd $Projectdir/resources/TMP
rm -Rf $Projectdir/resources/TMP/* 2> /dev/null
Ligne 125 ⟶ 148 :
if [ "$Filename" = "filename.txt" ]; then echo $"line 113: \$Filename = filename.txt error, exit -1"; exit -1; fi
rm ../filename.txt
#O go up in the directory resources and rename 'TMP' '$Filename'
cd ..
if test -e $Filename; then rm -R $Filename; fi
Ligne 132 ⟶ 156 :
mv TMP $Filename
cd $Filename
#T ls -al
cat $Filename|grep "<li><a href=">extract-li
cat extract-li | sed "s/title=\"/\n[[/g" | grep -v "<li><a href=" |sed "s/\">/]]\n/g"|grep -v "</a>\|Cat\|<div" >extract-li1
Ligne 138 ⟶ 163 :
cat extract-dd1 > $Bookname.$Suffix
cat extract-li1 >> $Bookname.$Suffix
#T echo "$Bookname.$Suffix = "
cp $Bookname.$Suffix $Projectdir/$Bookname.$Suffix
#T ***********************
if test -e $Projectdir/$Bookname.$Suffix
then
Ligne 146 ⟶ 173 :
cat $Projectdir/$Bookname.$Suffix
fi
#O ============================================================================
echo "Dowload $1"
#O Download the book in html form
#O Télécharger le site récursivement avec une profondeur infinie ( -linf ), \
#O convertit les liens pour une consultation en local ( -k ), \
#O rapatrie tous les fichiers nécessaires à l'affichage convenable d'une page HTML ( -p ) \
#O et renomme toutes les pages HTML avec l'extension .html ( -E )
echo; echo "Dowload $1"
wget -r -linf -k -p -E "$1" -o $Workdir/temp/wget-log-télécharger.txt
 
Ligne 161 ⟶ 194 :
then
{
echo $" Found Compiled page : $Projectdir/$Bookname.compiled"; echo
echo " Create $Projectdir/$Bookname.list with :"; echo " $Projectdir/$Bookname.compiled"; echo
cat "$Projectdir/$Bookname.compiled" | sed -f $Datasdir/content_cleaner.dat > $Projectdir/$Bookname.compiled.cleaned
cat "$Projectdir/$Bookname.compiled.cleaned" | grep -v '=' | sed "s/\[\[/https:\/\/$Site\/wiki\//g" | sed "s/\]\]//g" | grep "wiki" | tr ' ' '_' | cut -d '|' -f1 > $Projectdir/$Bookname.list
Ligne 168 ⟶ 201 :
}
fi
#T Print $Projectdir/$Bookname.prj
cat $Projectdir/$Bookname.prj
 
#O Download the complete book structure in project directory
cd $Projectdir
echo $"download all sub-directories of the book '$Bookname'"
wget -r -linf -k -p -E -i $Bookname.list -o $Workdir/temp/wget-log-download.txt
echo "----------"
#T Testspoint exit 0
#O Move the html pages into working sub-directories to document the pages and sub pages
#O create a local list to the downloaded directories $ Projectdir/$1.locale.list
echo "create the complete concatenated hierarchy of the directories of the book '$Bookname'"
cat $Projectdir/$Bookname.list | sed "s/https:\/\///g" | sed "s/\ /\\\ /g" | tr '\n' ',' > $Projectdir/$Bookname.locale.list
Ligne 180 ⟶ 218 :
cat $Projectdir/$Bookname.locale.list
echo "----------"
#O Copy the html files to respective directories
#O Create a file with the pagename $Projectdir/$Bookname.mainPage
echo
echo $"Create the page from the local link to the main page, 'the book'"
cat $Projectdir/$Bookname.locale.list | sed "s/ /\\ /g" | cut -d ',' -f1 > $Projectdir/$Bookname.mainPage
echo "----------"
#O Initialize the variable $mainPage
read mainPage < $Projectdir/$Bookname.mainPage
echo "variable mainPage = $mainPage"
#T cat $Projet/$1.mainPage | awk -F"/" '{print NF}' > nbchamps
#T read NbChamps < nbchamps
#T echo "Variable NbChamps = $NbChamps"
echo "----------"
#O Create a file of the working directories to be created.
ls "$mainPage" | sed "s/.html//g" > $Projectdir/$Bookname.dirs
echo "sub-working-diectories : "
cat $Projectdir/$Bookname.dirs
echo "----------"
#O Copy the html pages and subpages in the respective directories
while read line
do
Ligne 208 ⟶ 254 :
rm source ; rm destination
 
#O ============================================================================
#O Create variable PageSclt
PageSclt=$Projectdir/$Bookname.sclt
#O File creation '$Bookname.sclt' and print the contents.
echo "----------"
echo $"$(gettext '= Appendix = ')" > $PageSclt
echo >> $PageSclt
 
#O Add <references />
echo $"== References == " >> $PageSclt
echo $"<references$(gettext '== References />== ')" >> $PageSclt
echo "$(gettext '<references /> ')" >> $PageSclt
echo >> $PageSclt
echo "<div style='page-break-before:always'></div>" >> $PageSclt
#O Add the link to printable book and to articles.
echo $"== Contents == " >> $PageSclt
echo "$(gettext '== Contents == ')" >> $PageSclt
echo "<div style='font-zize:85%'>" >> $PageSclt
cat $Projectdir/$Bookname.list | tr ' ' '_' | tr '\n' '%' | sed "s/%/\n\n/g" >> $PageSclt
Ligne 224 ⟶ 275 :
 
#0 Add the link to the source of this edition.
echo $"$(gettext '=== Source for this edition === ')" >> $PageSclt
echo "<div style='font-zize:85%';>" >> $PageSclt
echo -n "https://" >> $PageSclt
cat $Projectdir/$Bookname.mainPage | sed "s/\\\ /_/g" >> $PageSclt
#P other version : cat $Projectdir/Bookname".list" | tr ' ' '_' | tr '\n' '%' | sed "s/%/%\n/g" | grep $1% | tr -d % >> $PageSclt
echo "</div>" >> $PageSclt
echo " " >> $PageSclt
echo "<div style='page-break-before:always'></div>" >> $PageSclt
 
#O Create section 'Article', 'Source', 'License', 'Contributors(?)'
echo $"== Articles Sources, and contributors == " >> $PageSclt
echo "$(gettext '== Articles Sources, and Contributors == ')" >> $PageSclt
#O add the text : style PediaPress or personalized.
#O The ''sources'' listed for each article provide more detailled licencing
#O information including the copyright status, the copyleft owner and the license conditions.
echo -n "<span style='font-zize:85%';>" >> $PageSclt
echo $"$(gettext 'The ''sources'' listed for each article provide more detailled licencing information including the copyright status, the copyleft owner and the license conditions..</span> ')" >> $PageSclt
#O or, validate one or the other of these texts :
# echo $"The texts are available with their respective licenses, however other terms may apply.<br />See the terms of use for more details : <br />https://wikimediafoundation.org/wiki/Conditions_d'utilisation.</span>" >> $PageSclt
echo " " >> $PageSclt
Ligne 240 ⟶ 297 :
echo "----------"
#O Create or recreate the list-file $Projectdir/$1.pj
cat $Projectdir/$Bookname.list | awk -F"/" '{ print $NF }' > $Projectdir/$Bookname.pj
Pjlist=$Projectdir/$Bookname.pj
echo "Pjlist : "$PjList
 
#O While exist line in file $PjList ,
while read line
do
#O Print the line read,
echo
echo $" line read = $line"
echo
#O Extract and copy all strings from the html file
#O $line.html in the file $line.str and add to screen
#T pwd
mkd -pws '**' "$line/$line.html" $Projectdir/$line/$line.tmp | tr ',' '\n' > $Projectdir/$line/$line.str
#T break
#O Create the documentation file of pages :
echo "*** References : articles, src, lic, contrib. "
#O Print article,
if [ $line != $Bookname ]
then
Ligne 259 ⟶ 325 :
cat $Projectdir/$line/$line.article
#O Print source,
echo -n $", ''source :'' https://"$Site"/w/index.php?oldid=" > $Projectdir/$line/$line.RevisionId
echo -n $", ''source : ''https://"$Site"/w/index.php?oldid=" > $Projectdir/$line/$line.RevisionId
cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e wgRevisionId | tr -d ':' | sed "s/\"/%/g" | cut -d'%' -f3 >> $Projectdir/$line/$line.RevisionId
if [ "$line" != "$Bookname" ]
Ligne 268 ⟶ 335 :
 
#P license du bas de page :
#P <li id="footer-info-copyright">Les textes sont disponibles sous <a href="https://creativecommons.org/licenses/by-sa/3.0/">license Creative Commons attribution partage à l’identique</a> ; d’autres termes peuvent s’appliquer.<br/>
#P Voyez les <a href="https://wikimediafoundation.org/wiki/Conditions_d'utilisation">termes d’utilisation</a> pour plus de détails.<br/></li>
#P
#P Print license :
#P <link rel="license" href="https://creativecommons.org/licenses/by-sa/3.0/"/>
 
 
#T echo -n $", ''licenseCopyright :'' " >> $Projectdir/$line/$lineArticleUn.licensetmp
#T cat ArticleUn.str | grep -n -m 1 -i -e license | sed "s/\"\//%\//g" | cut -d'%' -f2 |sed "s/\/\//https:\/\//g" >> ArticleUn.tmp
#O Print license :
echo -n $", ''license : ''" > $Projectdir/$line/$line.license
#T cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e license | sed "s/\"\//%\//g" | cut -d'%' -f4 >> $Projectdir/$line/$line.license
cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e license | sed "s/\"\//%\//g" | tr '"' '%' | cut -d'%' -f4 >> $Projectdir/$line/$line.license
#T cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e license | sed "s/\"\//%\//g" | cut -d'%' -f2 | sed "s/\/\//https:\/\//g" >> $Projectdir/$line/$line.license
if [ $line != $Bookname ]
then
Ligne 278 ⟶ 356 :
cat $Projectdir/$line/$line.license
#
#P spécial pour bas de page fr ##
cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e footer-info-copyright | sed "s/\"\//%\//g" | tr '"' '%' | cut -d'%' -f4 > $Projectdir/$line/$line.license
 
#O Author(s).
echo -n $", ''author : '' " > $Projectdir/$line/$line.author
echo -n $", ''authors : ''" > $Projectdir/$line/$line.author
cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e wgRelevantUserName | sed "s/\"/%/g" | cut -d'%' -f4 > tmp
if test -s tmp
Ligne 288 ⟶ 368 :
if wget --spider https://xtools.wmflabs.org/articleinfo/en.wikibooks.org/$line 2>/dev/null
then
echo $"$(gettext '. -see :') https://xtools.wmflabs.org/articleinfo/$Sitename/$line" >> $Projectdir/$line/$line.author
elif wget --spider https://xtools.wmflabs.org/articleinfo/$Sitename/$Bookname/$line 2>/dev/null
then
echo $"$(gettext '. -see :') https://xtools.wmflabs.org/articleinfo/$Sitename/$Bookname/$line" >> $Projectdir/$line/$line.author
else
cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e wgRelevantPageName | sed "s/\"/%/g" | cut -d'%' -f4 > tmp
if test -s tmp
then
#T echo "&action=history" >> tmp
echo -n $"$(gettext '. -see ''contributors'' in book, history page of ')" >> $Projectdir/$line/$line.author
cat tmp >> $Projectdir/$line/$line.author; rm tmp
fi
Ligne 311 ⟶ 392 :
echo >> $PageSclt
#O end of while.
done < $Pjlist
#O Add end of div and pagre break
echo "</div>" >> $PageSclt
echo "<div style='page-break-before:always'></div>" >> $PageSclt
#P
#P Création de la page Bookname.scli (sources contributeurs licenses des images)
#P
 
#0 Initialisation de la variable d'entête des fichiers scli.
Headscli=$Projectdir/$Bookname.scli
echo > $Headscli
#O Afficher le nom du fichier Headscli à la console
#O Show Headscli filename to console
echo
echo -n "$Headscli"; echo $" english version"; echo
echo $"== Images sourcesSources licensesLicenses and contributorsContributors ==" > $Headscli
echo -n "<span style='font-size:85%'>"; echo $"The ''sources'' listed for each illustration provide more detailed licensing information, including copyright status, the holders of these rights and the license conditions.</span>" >> $Headscli
echo " " >> $Headscli
echo "<div style='font-size:72%'>" >> $Headscli
echo >> $Headscli
#T Show the content of file Headscli cat $Headscli; exit 0
 
#O ============================================================================
#O If the file $Projectdir/$Bookname/$Bookname.str exist, create the PageSclic in classic order
#O Si la page $Projectdir/$Bookname/$Bookname.str existe, créer la page $PageSclic contenant les images dans un ordre classique
if test -e $Projectdir/$Bookname/$Bookname.str
then
#O Select lines containing 'fichier:', 'file', image and create bookname.files
cat $Projectdir/$Bookname/$Bookname.str | grep -n -i -e fichier: -e file: -e image: > $Projectdir/$Bookname/$Bookname.files
#O Select lines containing 'fichier:', '.jpg', '.png', '.gif' and create bookname.pict
cat $Projectdir/$Bookname/$Bookname.str | grep -n -i -e fichier: -e .jpg -e .png -e .gif > $Projectdir/$Bookname/$Bookname.picts
#O Sélect in bookname.files, the lines containing 'title', remove the tag <div> cut ">" and select the last champ to create bookname.illustrations
cat $Projectdir/$Bookname/$Bookname.files | grep title |sed "s/<\/div>//g" | awk -F">" '{print $NF}' > $Projectdir/$Bookname/$Bookname.illustrations
#O Dans le fichier .files avec le séparateur "=" imprimer dans le champ 'i' le retour chariot, sélectionner le slignes conteneant 'https', remplacer le caractère '"' par '!'
#O et sélectionner le trosième champ, puis relélectionner la ligne contenant https, remplacer le caractère '>' par !, supprimer </a, puis supprimer le caractère '!'
#O et créer le fichier.links
cat $Projectdir/$Bookname/$Bookname.files | awk -F"=" '{for (i=1;i<=NF;i++) print $i "\n"}' | grep https | sed "s/\"/!/g" | cut -d '!' -f3 \
| grep https | tr '>' ! | sed "s/<\/a//g" |sed "s/!//g" > $Projectdir/$Bookname/$Bookname.links
#OF Télécharger les fichiers contenus dans la liste du fichier bookname.links
#O Download the files contained in the list of the bookname.links file
wget -P $Projectdir/$Bookname -r -linf -k -p -E -i $Projectdir/$Bookname/$Bookname.links
#O Copy html liles from ./commons.wikimedia.org/wiki in the curreny directory
cd $Bookname
if test -e commons.wikimedia.org; then cp -R commons.wikimedia.org/wiki/*.html . ; fi
#O html.list initialization
echo -n "" > html.list
if test -s $Projectdir/$Bookname/$Bookname.links
Ligne 345 ⟶ 446 :
echo $"$Projectdir/$Bookname/$Bookname.links is not empty"
#OF Tant qu'on lit des lignes dans le fichier .links, lire les images et les lister dans html.list
#O As long as there is a line in file html.links, read the line and copy it to html.list
while read line
do
Ligne 358 ⟶ 460 :
echo " *** References : image, src, lic, contrib."
#O As long as there is a line in file html.list extract illustrations, sources, licenses, authors(s)
while read line
do
Ligne 373 ⟶ 476 :
cat $line.title
 
echo -n $", '' source : ')''https://"$Site"/w/index.php?title= " > $line.source
echo $line | sed "s/.html//g" >> $line.source
cat $line.source >> $PageSclic
Ligne 383 ⟶ 486 :
cat $line.license
echo -n $", authorauthors : ''" > $line.authors
rm tmp
cat $line.str | grep -i -n -m1 -A 1 -e Author | grep -i -e user -e utilisteur -e auteur | tr '/' '\n' | grep -i -e user -e utilisteur -e auteur | cut -d '"' -f1 > tmp
Ligne 394 ⟶ 497 :
echo >> $PageSclic
done < html.list
#P bas de la page avant la nouvelle page
echo "</div>" >> $PageSclic
#T echo "$(gettext '{{Newpage}} ')" >> $PageSclic
echo "<div style='page-break-before:always'></div>" >> $PageSclic
#O end of test -e $Projectdir/$Bookname/$Bookname.str
else
echo -e "\033[31m"
echo $"Can not create $Projectdir/$Bookname/$Bookname.sclic. URL page of book is not found"
echo -e "\033[0m"
#O end of create PageSclic
fi
#O ============================================================================
#O Create variable Pagesclipco
Pagesclipco="$Projectdir/$Bookname.sclipco"
echo $Pagesclipco
#O Wikibooks sclipco personalized page initialization with the title Images sources, etc.
cat $Projectdir/$Bookname.scli > $Pagesclipco
#Test
cat $Pagesclipco
#O ============================================================================
#O Create an identification loop of the directories corresponding to the articles
#O As long as we can read the lines of the file $Projectdir/$Bookname.pj
while read pjline
do
#O If the line read is not $Bookname (name of the book)
#T echo "line read : " $line
if [ $pjline != $Bookname ]
#O Then:
then
#O Enter in the article directory,
cd $Projectdir/$pjline
#O Create image documentation files
#O Open the stream of $ Projectdir/$Bookname/$Bookname.str of the image and select
#O the character strings containing: File:, Image; and put in files
#O $Projectdir/$line/$line/.files, .pict, .illustration, .images, .links
cat $Projectdir/$pjline/$pjline.str | grep -n -i -e Fichier: -e file: -e image: | sed -f $RepCom/$Conversions > $Projectdir/$pjline/$pjline.files
cat $Projectdir/$pjline/$pjline.str | grep -n -i -e fichier: -e .jpg -e .png -e .gif | sed -f $RepCom/$Conversions> $Projectdir/$pjline/$pjline.picts
cat $Projectdir/$pjline/$pjline.files | grep title |sed "s/<\/div>//g" | awk -F">" '{print $NF}' > $Projectdir/$pjline/$pjline.illustrations
#T cat $Projectdir/$pjline/$pjline.files | awk -F"=" '{for (i=1;i<=NF;i++) print $i "\n"}' | grep https | sed "s/\"/!/g" | cut -d '!' -f3 | grep https | tr '>' ! | sed "s/<\/a//g" |sed "s/!//g" > $Projectdir/$pjline/$pjline.links
cat $Projectdir/$pjline/$pjline.files | awk -F"=" '{for (i=1;i<=NF;i++) print $i "\n"}' | grep https://$Site | sed "s/\"/!/g" | cut -d '!' -f2 > $Projectdir/$pjline/$pjline.images
#Tbreak
#O Transform the links of the image file on wikibooks into an image file on commons
cat $Projectdir/$pjline/$pjline.images | sed "s/$Site/commons.wikimedia.org/g"| sed "s/Fichier/File/g" > $Projectdir/$pjline/$pjline.commonsimages
#O Download the image files from the wikimedia server.
#P Note: the -N option allows you to avoid downloading an up-to-date file,
#P and without adding a numbering.
#T #T wget -N -P $Projectdir/$pjline -i $Projectdir/$pjline/$pjline.images
wget -P $Projectdir/$pjline -r -linf -k -p -E -i $Projectdir/$pjline/$pjline.commonsimages
#T echo "*** Commonsimages ***"; cat $Projectdir/$pjline/$pjline.commonsimages; exit 0
#O Copy the downloaded images to the directory of the current article..
cp $Projectdir/$pjline/commons.wikimedia.org/wiki/*.html $Projectdir/$pjline/.
#O Initialize the commonshtml.list file with empty text.
echo -n "" > commonshtml.doublons
#O List the image files in the order of printing or display,
#O using the list $Projectdir/$pjline/$pjline.commons.images
#O As long as we can read lines in $Projectdir/$pjline/$pictline.images
while read pictline
do
#O Cut the lignes at carriage return, sélect the last field and add '.html'
#echo $pictline | awk -F"/" '{for (i=1;i<=NF;i+=2) print $i "\n"}' #| cut -d '%' -f1 | cut -d '.' -f1 > tmp
echo $pictline | awk -F"/" '{ print $NF".html"}' >> commonshtml.doublons
#O Cut the duplicated lines and select even fields.
echo -n "" > commonshtml.list
awk 'BEGIN { FILENAME }
Ligne 435 ⟶ 570 :
# print "Fin"
} ' commonshtml.doublons
#O End of while $Projectdir/$pjline/$pjline.commonsimages
done < $Projectdir/$pjline/$pjline.commonsimages
 
#T Afficher html.list
#T echo "*** commonshtml.list ***"; cat commonshtml.listexit 0
#O Copy article name in file $Bookname.sclipco
echo "'''Article : $pjline'''<br />" >> $Pagesclipco
echo "'''Article : $pjline'''"
Ligne 443 ⟶ 582 :
#P## Annexe version 'wikimedia commons' ##############################
 
#O As long as there are (local) links in the commonshtml.list image file
while read htmlline
do
#O Afficher la ligne lue,
echo ""
echo ""
echo $" ---- line read = $htmlline --- "
echo ""
#O With mkd (sofware), select the character strings from the image file $htmlline
#O and copy them to $ htmlline.co.str after replacing the character ',' with
#O 'new-line'
mkd -pw '**' $htmlline $htmlline.tmp
cat $htmlline.tmp | tr ',' '\n' > $htmlline.co.str
#T echo "*** $htmlline.co.str : ***"; cat $htmlline.co.str; exit 0
#O images,
echo -n $"'''Illustration : '''" > $htmlline.co.title
cat $htmlline.co.str | grep wgTitle | cut -d '"' -f4 >> $htmlline.co.title
cat $htmlline.co.title >> $Pagesclipco
cat $htmlline.co.title
#T echo "*** $htmlline.co.title : ***"; cat $htmlline.co.title; exit 0
#O source,
echo -n $", ''source : ''https://commons.wikimedia.org/wiki/" > $htmlline.co.source
##echo -n ",''$(gettext 'source : ')''https://commons.wikimedia.org/wiki/" > $htmlline.co.source
echo -n $htmlline | sed "s/.html//g" | sed "s/.str//g" >> $htmlline.co.source
if [ "$Site" = "fr.wikibooks.org" ]; then echo "?uselang=fr" >> $htmlline.co.source
Ligne 464 ⟶ 612 :
cat $htmlline.co.source >> $Pagesclipco
cat $htmlline.co.source
#T echo "*** $htmlline.co.source : ***"; cat $htmlline.co.source; exit 0
#O license,
echo -n $", ''license : '' " > $htmlline.co.license
cat $htmlline.co.str | grep licensetpl_short | sed "s/<td>//g" | sed "s/<span class//g" | sed "s/<\/span>//g" | sed "s/style=\"display:none;\"//g" | tr '=' '\n' | grep licensetpl_short | awk -F">" '{print $NF}' >> $htmlline.co.license
cat $htmlline.co.license >> $Pagesclipco
cat $htmlline.co.license
#T echo "*** $htmlline.co.license : ***"; cat $htmlline.co.license; exit 0
 
#O authors.
rm -rf tmp
echo -n $", ''authorauthors : ''" > $htmlline.co.authors
#Test cat tmp; echo "$htmlline.co.authors"; exit -1
cat $htmlline.co.str | grep -i -n -m1 -A 1 -e Author -e Auteur | tr '/' '\n' | grep -i -e user -e utilisteur -e auteur -e author | cut -d '"' -f1 | grep -i -e user -e utilisteur -e auteur -e author > tmp
if test -s tmp; then echo ; else echo "-" > tmp; fi
Ligne 477 ⟶ 629 :
cat $htmlline.co.authors >> $Pagesclipco
cat $htmlline.co.authors
#O Finish the page $Pagesclipco
echo "" >> $Pagesclipco
#O End of as long as there are lines in commonshtml.list
done < commonshtml.list
#O End of 'if the line is not the name of book'.
fi
#O End of while line in $Bookname.pj
done < $Projectdir/$Bookname.pj
#O clean intermediate files
rm -rf tmp
#O End of page $Pagesclipco
echo "</div>" >> $Pagesclipco
#T echo "$(gettext ' {{Newpage}} ')" >> $Pagesclipco
echo "<div style='page-break-before:always'></div>" >> $Pagesclipco
#O ============================================================================
#O Creating Bookname.appendix
cat $Projectdir/$Bookname.sclt>$Projectdir/$Bookname.appendix
cat $Projectdir/$Bookname.sclipco>>$Projectdir/$Bookname.appendix
echo $"= License of articles and sofware =">>$Projectdir/$Bookname.appendix
echo "<div style='font-zize:85%'>">>$Projectdir/$Bookname.appendix
echo "Creative Commons Attribution Share Alike 3.0">>$Projectdir/$Bookname.appendix
echo "//creativecommons.org/licenses/by-sa/3.0/">>$Projectdir/$Bookname.appendix
echo >>$Projectdir/$Bookname.appendix
echo $"This appendix is created with addappendix software version $VERSION">>$Projectdir/$Bookname.appendix
echo "https://fr.wikibooks.org/wiki/Auto-éditer_un_wikilivre/addappendix">>$Projectdir/$Bookname.appendix
echo "</div>">>$Projectdir/$Bookname.appendix
echo "<div style='page-break-before:always'></div>">>$Projectdir/$Bookname.appendix
#O ============================================================================
#O Display file Bookname.appendix
echo;echo -e "\033[1;32mcopy and paste the text displayed and add after the book $Bookname.\033[0m"
cat $Projectdir/$Bookname.appendix
echo;echo -e "\033[1;32mcopy and paste the text displayed and add after the book.$Bookname\033[0m"
 
exit 0
#O addappendix script end
</syntaxhighlight>