<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:51:40 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-12335] mb_prealloc_table  table read/write code is racy</title>
                <link>https://jira.whamcloud.com/browse/LU-12335</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Preallocation table read/write code is racy. There is a&#160;&#160;possibility of accessing memory outside of allocated table.&lt;br/&gt;
This issue can be easy reproduced. I am not sure, I have to upload test that lead to test system to be crashed. So I put it here.&lt;br/&gt;
&#160;&lt;br/&gt;
dd if=/dev/zero of=&amp;lt;path_to_ldiskfs_partition&amp;gt; bs=1048576 count=1024 conv=fsync&lt;br/&gt;
cat &quot;32 64 128 256&quot; &amp;gt; /proc/fs/ldiskfs/&amp;lt;dev&amp;gt;/prealloc_table&lt;/p&gt;</description>
                <environment></environment>
        <key id="55742">LU-12335</key>
            <summary>mb_prealloc_table  table read/write code is racy</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="artem_blagodarenko">Artem Blagodarenko</assignee>
                                    <reporter username="artem_blagodarenko">Artem Blagodarenko</reporter>
                        <labels>
                    </labels>
                <created>Fri, 24 May 2019 07:25:18 +0000</created>
                <updated>Tue, 10 Sep 2019 14:00:42 +0000</updated>
                            <resolved>Tue, 10 Sep 2019 13:45:39 +0000</resolved>
                                                    <fixVersion>Lustre 2.13.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="247624" author="gerrit" created="Fri, 24 May 2019 07:27:03 +0000"  >&lt;p&gt;Artem Blagodarenko (c17828@cray.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/34950&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34950&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12335&quot; title=&quot;mb_prealloc_table  table read/write code is racy&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12335&quot;&gt;&lt;del&gt;LU-12335&lt;/del&gt;&lt;/a&gt; ldiskfs: fixed size preallocation table&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 411f62650ddb2ef569fbcd38891e0610ce44b0d3&lt;/p&gt;</comment>
                            <comment id="247630" author="adilger" created="Fri, 24 May 2019 08:37:45 +0000"  >&lt;p&gt;I guess the first question is whether the preallocation table settings are even useful?  We&apos;ve been carrying that patch for many years without submitting it upstream, because I&apos;m not sure whether it actually improves performance or functionality or is just overhead for patch maintenance?  Do you have a real test system where you could measure performance under load to see if removing &lt;tt&gt;ext4-prealloc.patch&lt;/tt&gt; improves or hurts performance or allocation behaviour?  &lt;/p&gt;

&lt;p&gt;If there is data that shows the patch improves performance noticeably under at least some non-Lustre workloads, and doesn&apos;t hurt performance, then it would make sense to push the patch upstream finally.&lt;/p&gt;</comment>
                            <comment id="247631" author="artem_blagodarenko" created="Fri, 24 May 2019 09:06:07 +0000"  >&lt;p&gt;I used preallocation table to solve allocator problems on aged systems(&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12103&quot; title=&quot;Improve block allocation for large partitions&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12103&quot;&gt;&lt;del&gt;LU-12103&lt;/del&gt;&lt;/a&gt;). There are two (and 3rd is bigalloc) solutions:&#160;&lt;/p&gt;
&lt;ul&gt;
	&lt;li&gt;A new block allocator algorithm has been developed (&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12103&quot; title=&quot;Improve block allocation for large partitions&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12103&quot;&gt;&lt;del&gt;LU-12103&lt;/del&gt;&lt;/a&gt;, send to upstream) by Cray to strategically skip low probability-of-match block groups while attempting to locate contiguous block groups when they likely won&#8217;t exist.&#160;&#160;&lt;/li&gt;
	&lt;li&gt;A novel script has been developed to dynamically adjust the block device pre-allocation table. This controls the number of pre-allocated blocks that are created for the request size in logarithmic increments starting at 4. As file systems fragment and become filled, some free block groups will simply not be available. Because of this, the block allocator should be tuned to address this on a regular basis.&#8239;&#160;&lt;/li&gt;
&lt;/ul&gt;


&lt;blockquote&gt;&lt;p&gt;Do you have a real test system where you could measure performance under load to see if removing &lt;tt&gt;ext4-prealloc.patch&lt;/tt&gt; improves or hurts performance or allocation behaviour?&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;We have test results for third solution. 140TB ldiskfs partition. Will share results to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12103&quot; title=&quot;Improve block allocation for large partitions&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12103&quot;&gt;&lt;del&gt;LU-12103&lt;/del&gt;&lt;/a&gt;. For second solution I have some synthetic test results:&lt;/p&gt;

&lt;p&gt;Here is bash script that build prealloc table based on mb_groups output:&#160;&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
[root@localhost cray-lustre]# cat build_prealloc.sh&#160;

#!/bin/bash&#160;
INPUT_FILE=$1&#160;

#columes from 9 to 21 shows how free fragments available&#160;
&lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; index in {9..21}&#160;
&lt;span class=&quot;code-keyword&quot;&gt;do&lt;/span&gt;&#160;
&#160;&#160; PARAMS=&lt;span class=&quot;code-quote&quot;&gt;&quot;&lt;span class=&quot;code-quote&quot;&gt;&apos;NR&amp;gt;1 {&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (\$$index &amp;gt; 0) { print }}&apos;&lt;/span&gt;&quot;&lt;/span&gt;&#160;
   REGS=`eval awk &lt;span class=&quot;code-quote&quot;&gt;&quot;$PARAMS&quot;&lt;/span&gt; $INPUT_FILE | wc -l`&#160;
&#160;&#160; VAL=$((2 ** ($index-8)))&#160;
&#160;&#160;&#160;&#160; [ $REGS -gt 0 ] &amp;amp;&amp;amp; PREALLOC_TABLE=&lt;span class=&quot;code-quote&quot;&gt;&quot;$PREALLOC_TABLE $VAL&quot;&lt;/span&gt;&#160;
done&#160;
echo &lt;span class=&quot;code-quote&quot;&gt;&quot;prealloc table: $PREALLOC_TABLE&quot;&lt;/span&gt;&#160;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Example how to use it:&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;cat /proc/fs/ldiskfs/loop1/mb_groups &amp;gt; table.dat&#160;
sh build_prealloc.sh table.dat &amp;gt; prealloc.txt&#160;
cat prealloc.txt &amp;gt; /proc/fs/ldiskfs/loop1/prealloc_table&#160;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&#160;&lt;br/&gt;
 Here is test output of my local testing on shared fsxfs-n24.img. I have extracted and make two copies of this image for clear testing.&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;tar -xf fsxfs-n24.img.tgz&#160;

cp fsxfs-n24.img fsxfs-n24-2.img&#160;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;And run test that 1) make large preallocation table 2) start dd 3) adjust preallocation table using script above 4) start dd&#160;&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
start_mb_stats()&#160;
{&#160;
&#160;&#160;&#160;&#160;&#160;&#160;&#160; echo &lt;span class=&quot;code-quote&quot;&gt;&quot;1&quot;&lt;/span&gt; &amp;gt; /sys/fs/ldiskfs/loop1/mb_stats&#160;
&#160;&#160;&#160;&#160;&#160;&#160;&#160; echo &lt;span class=&quot;code-quote&quot;&gt;&quot;0&quot;&lt;/span&gt; &amp;gt; /sys/fs/ldiskfs/loop1/mb_c1_threshold&#160;
&#160;&#160;&#160;&#160;&#160;&#160;&#160; echo &lt;span class=&quot;code-quote&quot;&gt;&quot;0&quot;&lt;/span&gt; &amp;gt; /sys/fs/ldiskfs/loop1/mb_c2_threshold&#160;
&#160;&#160;&#160;&#160;&#160;&#160;&#160; echo &lt;span class=&quot;code-quote&quot;&gt;&quot;0&quot;&lt;/span&gt; &amp;gt; /sys/fs/ldiskfs/loop1/mb_c3_threshold&#160;
}&#160;

mount_image()&#160;
{&#160;
&#160;&#160;&#160;&#160;&#160;&#160;&#160; local IMAGE=$1&#160;

&#160;&#160;&#160;&#160;&#160;&#160;&#160; mount -t xfs -o loop $IMAGE /mnt/fs2xfs/&#160;
&#160;&#160;&#160;&#160;&#160;&#160;&#160; mount -t ldiskfs -o loop /mnt/fs2xfs/n24.raw /mnt/fs2ost/&#160;
}&#160;

umount_image()&#160;
{&#160;
&#160;&#160;&#160;&#160;&#160;&#160;&#160; umount /mnt/fs2ost/&#160;
&#160;&#160;&#160;&#160;&#160;&#160;&#160; umount /mnt/fs2xfs/&#160;
}&#160;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;1. Set too large preallocation table and estimate write speed&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LOAD=yes lustre/tests/llmount.sh&#160;
mount_image /lustre/mnt/staff/CAST-19722/fsxfs-n24.img&#160;
echo &quot;256 512 1024 2048 4096 8192 16384&quot; &amp;gt; /proc/fs/ldiskfs/loop1/prealloc_table&#160;
start_mb_stats&#160;
dd if=/dev/zero of=/mnt/fs2ost/O/foofile bs=1048576&#160; count=1024&#160; conv=fsync&#160;
cat /proc/fs/ldiskfs/loop1/mb_alloc&#160;
echo &quot;clear&quot; &amp;gt; /proc/fs/ldiskfs/loop1/mb_alloc&#160;
umount_image&#160;
mount_image /lustre/mnt/staff/CAST-19722/fsxfs-n24-2.img&#160;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;2. Adjast preallocation table based on mb_groups output&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;cat /proc/fs/ldiskfs/loop1/mb_groups &amp;gt; $TMP/table.dat&#160;
sh build_prealloc.sh $TMP/table.dat &amp;gt; $TMP/prealloc.txt&#160;
cat $TMP/prealloc.txt &amp;gt; /proc/fs/ldiskfs/loop1/prealloc_table&#160;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;3. Estimate preformance again&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;dd if=/dev/zero of=/mnt/fs2ost/O/foofile bs=1048576&#160; count=1024&#160; conv=fsync&#160;
cat /proc/fs/ldiskfs/loop1/mb_alloc&#160;
echo &quot;clear&quot; &amp;gt; /proc/fs/ldiskfs/loop1/mb_alloc&#160;
umount_image&#160;

[root@localhost cray-lustre]# sh start.sh&#160;&#160;
Loading modules from /lustre/mnt/orig/cray-lustre/lustre/tests/..&#160;
detected 8 online CPUs by sysfs&#160;
libcfs will create CPU partition based on online CPUs&#160;
1024+0 records in&#160;
1024+0 records out&#160;
1073741824 bytes (1.1 GB) copied, 11.2427 s, 95.5 MB/s&#160;
mballoc: 262144 blocks 153 reqs (137 success)&#160;
mballoc: 2046 extents scanned, 127 goal hits, 1 2^N hits, 10 breaks, 0 lost&#160;
mballoc: (0, 0, 0) useless c(0,1,2) loops&#160;
mballoc: (0, 0, 0) skipped c(0,1,2) loops&#160;
1024+0 records in&#160;
1024+0 records out&#160;
1073741824 bytes (1.1 GB) copied, 9.22825 s, 116 MB/s&#160;

mballoc: 262143 blocks 243 reqs (240 success)&#160;
mballoc: 141 extents scanned, 113 goal hits, 129 2^N hits, 0 breaks, 0 lost&#160;
mballoc: (0, 0, 0) useless c(0,1,2) loops&#160;
mballoc: (0, 0, 0) skipped c(0,1,2) loops&#160;
[root@localhost cray-lustre]#&#160;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;test passed and shows ~18% speed improvement&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@localhost cray-lustre]# sh start.sh&#160;
Loading modules from /lustre/mnt/orig/cray-lustre/lustre/tests/..&#160;
detected 8 online CPUs by sysfs&#160;
libcfs will create CPU partition based on online CPUs&#160;
1024+0 records in&#160;
1024+0 records out&#160;
1073741824 bytes (1.1 GB) copied, 11.2427 s, 95.5 MB/s&#160;

mballoc: 262144 blocks 153 reqs (137 success)&#160;
mballoc: 2046 extents scanned, 127 goal hits, 1 2^N hits, 10 breaks, 0 lost&#160;
mballoc: (0, 0, 0) useless c(0,1,2) loops&#160;
mballoc: (0, 0, 0) skipped c(0,1,2) loops&#160;
1024+0 records in&#160;
1024+0 records out&#160;
1073741824 bytes (1.1 GB) copied, 9.22825 s, 116 MB/s&#160;

mballoc: 262143 blocks 243 reqs (240 success)&#160;
mballoc: 141 extents scanned, 113 goal hits, 129 2^N hits, 0 breaks, 0 lost&#160;
mballoc: (0, 0, 0) useless c(0,1,2) loops&#160;
mballoc: (0, 0, 0) skipped c(0,1,2) loops&#160;
[root@localhost cray-lustre]#&#160;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I am going test this approach on 140TB ldiskfs OST soon.&lt;/p&gt;</comment>
                            <comment id="247667" author="adilger" created="Fri, 24 May 2019 20:38:59 +0000"  >&lt;blockquote&gt;
&lt;p&gt;A novel script has been developed to dynamically adjust the block device pre-allocation table. This controls the number of pre-allocated blocks that are created for the request size in logarithmic increments starting at 4. As file systems fragment and become filled, some free block groups will simply not be available. Because of this, the block allocator should be tuned to address this on a regular basis.&#8239;&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;How hard would it be to include this into the mballoc code in the kernel directly?  Having a userspace tool is OK, but suffers from a number of limitations (hard for most users to configure, can die if there are problems (e.g. OOM), become CPU starved if the server is busy, needs extra scanning to learn current filesystem state and may become out of sync with the kernel).&lt;/p&gt;</comment>
                            <comment id="247698" author="artem_blagodarenko" created="Sat, 25 May 2019 05:31:10 +0000"  >&lt;p&gt;The algorithm is not difficult, as you can see in script. So, can be added to kernel. The most difficult diction - moment then we need to reconfigure preallocation table. With script, administrator decide, then change configuration.&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;&amp;gt; (hard for most users to configure, can die if there are problems (e.g. OOM),&lt;/p&gt;

&lt;p&gt;My suggestion, add to cluster scripts and adjust automatically.&lt;/p&gt;

&lt;p&gt;&amp;gt;become CPU starved if the server is busy&lt;/p&gt;

&lt;p&gt;Preallocation table changing is quite fast operation, and with patch &lt;a href=&quot;https://review.whamcloud.com/34950&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34950&lt;/a&gt;, safe and lockless.&lt;/p&gt;

&lt;p&gt;&amp;gt;needs extra scanning to learn current filesystem state and may become out of sync with the kernel).&lt;/p&gt;

&lt;p&gt;Scanning is made by kernel. Script use &quot;/proc/fs/ldiskfs/loop1/mb_groups&quot; output. This statistic is perfect data for such decision. Anyway, even in kernel we need use this statistic.&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="247902" author="gerrit" created="Wed, 29 May 2019 04:24:31 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/34950/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34950/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12335&quot; title=&quot;mb_prealloc_table  table read/write code is racy&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12335&quot;&gt;&lt;del&gt;LU-12335&lt;/del&gt;&lt;/a&gt; ldiskfs: fixed size preallocation table&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: f15995b8e52bafabe55506ad2e12c8a64a373948&lt;/p&gt;</comment>
                            <comment id="247937" author="pjones" created="Wed, 29 May 2019 12:58:31 +0000"  >&lt;p&gt;Landed for 2.13&lt;/p&gt;</comment>
                            <comment id="249355" author="simmonsja" created="Sun, 16 Jun 2019 14:44:35 +0000"  >&lt;p&gt;This fix was only every applied to RHEL platforms. SLES and Ubuntu lack this fix.&lt;/p&gt;</comment>
                            <comment id="254443" author="pjones" created="Tue, 10 Sep 2019 13:45:39 +0000"  >&lt;p&gt;As per recent LWG discussion this ticket should be marked as RESOLVED and anyone wanting to keep SLES/Ubuntu servers in sync should do that under a separate ticket&lt;/p&gt;</comment>
                            <comment id="254444" author="simmonsja" created="Tue, 10 Sep 2019 13:58:26 +0000"  >&lt;p&gt;Please make sure this is push to the ext4 maintainers.&lt;/p&gt;</comment>
                            <comment id="254445" author="artem_blagodarenko" created="Tue, 10 Sep 2019 14:00:42 +0000"  >&lt;p&gt;No need send this patch to ext4 upstream because no such bug there. Bug was introduced in our ldiskfs patches.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="55236">LU-12103</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="56278">LU-12511</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00gx3:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>