<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:42:37 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-11294] MDS kernel panic, LNetError: 4307:0:(lib-msg.c:594:lnet_incr_hstats()) LBUG</title>
                <link>https://jira.whamcloud.com/browse/LU-11294</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;MDS2 hit LBUG after recovery completed&lt;/p&gt;

&lt;p&gt;soak-10 console&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[  485.435828] Lustre: soaked-MDT0002: Recovery over after 0:44, of 28 clients 28 recovered and 0 were evicted.
Aug 29 03:10:01 soak-10 TIME: Time stamp for console
[ 3453.242721] LNetError: 4307:0:(o2iblnd_cb.c:3321:kiblnd_check_txs_locked()) Timed out tx: active_txs, 0 seconds
[ 3453.255641] LNetError: 4307:0:(o2iblnd_cb.c:3396:kiblnd_check_conns()) Timed out RDMA with 192.168.1.109@o2ib (28): c: 6, oc: 0, rc: 8
[ 3453.272156] Lustre: 4335:0:(client.c:2126:ptlrpc_expire_one_request()) @@@ Request sent has failed due to network error: [sent 1535514904/real 1535514909]  re
q@ffff9bf8f3dfe000 x1610100967659856/t0(0) o400-&amp;gt;soaked-MDT0001-osp-MDT0002@192.168.1.109@o2ib:24/4 lens 224/224 e 0 to 1 dl 1535514911 ref 1 fl Rpc:eXN/0/ffffff
ff rc 0/-1
[ 3453.309301] Lustre: soaked-MDT0001-osp-MDT0002: Connection to soaked-MDT0001 (at 192.168.1.109@o2ib) was lost; in progress operations using this service will 
wait for recovery to complete
[ 3454.242963] LNet: 4307:0:(o2iblnd_cb.c:3367:kiblnd_check_conns()) Timed out tx for 192.168.1.109@o2ib: 1 seconds
[ 3651.408451] Lustre: soaked-MDT0002: haven&apos;t heard from client soaked-MDT0001-mdtlov_UUID (at 192.168.1.109@o2ib) in 227 seconds. I think it&apos;s dead, and I am e
victing it. exp ffff9bf8fbbc7400, cur 1535515108 expire 1535514958 last 1535514881
Aug 29 04:01:01 soak-10 TIME: Time stamp for console
[ 7026.178219] LNet: Service thread pid 6456 was inactive for 200.53s. The thread might be hung, or it might only be slow and will resume later. Dumping the stac
k trace for debugging purposes:
[ 7026.200999] Pid: 6456, comm: mdt01_016 3.10.0-862.9.1.el7_lustre.x86_64 #1 SMP Fri Aug 17 20:37:05 UTC 2018
[ 7026.213853] Call Trace:
[ 7026.218428]  [&amp;lt;ffffffffc1904ab8&amp;gt;] osp_precreate_reserve+0x2d8/0x800 [osp]
[ 7026.227906]  [&amp;lt;ffffffffc18f9bb9&amp;gt;] osp_declare_create+0x199/0x5b0 [osp]
[ 7026.236955]  [&amp;lt;ffffffffc1844e8f&amp;gt;] lod_sub_declare_create+0xdf/0x210 [lod]
[ 7026.246232]  [&amp;lt;ffffffffc183e62e&amp;gt;] lod_qos_declare_object_on+0xbe/0x3a0 [lod]
[ 7026.255759]  [&amp;lt;ffffffffc184b40f&amp;gt;] lod_alloc_rr.constprop.18+0xc16/0xf63 [lod]
[ 7026.265270]  [&amp;lt;ffffffffc1844326&amp;gt;] lod_qos_prep_create+0x1416/0x17a0 [lod]
[ 7026.274312]  [&amp;lt;ffffffffc18448c5&amp;gt;] lod_prepare_create+0x215/0x2e0 [lod]
[ 7026.283060]  [&amp;lt;ffffffffc18367ee&amp;gt;] lod_declare_striped_create+0x1ee/0x980 [lod]
[ 7026.292518]  [&amp;lt;ffffffffc183aec4&amp;gt;] lod_declare_create+0x204/0x590 [lod]
[ 7026.301124]  [&amp;lt;ffffffffc18ad892&amp;gt;] mdd_declare_create_object_internal+0xe2/0x2f0 [mdd]
[ 7026.311217]  [&amp;lt;ffffffffc189d1c8&amp;gt;] mdd_declare_create+0x48/0xc10 [mdd]
[ 7026.319652]  [&amp;lt;ffffffffc18a15e9&amp;gt;] mdd_create+0x929/0x13f0 [mdd]
[ 7026.327460]  [&amp;lt;ffffffffc174ce37&amp;gt;] mdt_reint_open+0x2117/0x3160 [mdt]
[ 7026.335764]  [&amp;lt;ffffffffc1740ce3&amp;gt;] mdt_reint_rec+0x83/0x210 [mdt]
[ 7026.343611]  [&amp;lt;ffffffffc17201d2&amp;gt;] mdt_reint_internal+0x6b2/0xa80 [mdt]
[ 7026.352048]  [&amp;lt;ffffffffc172c6c2&amp;gt;] mdt_intent_open+0x82/0x350 [mdt]
[ 7026.360063]  [&amp;lt;ffffffffc172a768&amp;gt;] mdt_intent_policy+0x2f8/0xd10 [mdt]
[ 7026.368365]  [&amp;lt;ffffffffc12c8e9e&amp;gt;] ldlm_lock_enqueue+0x34e/0xa50 [ptlrpc]
[ 7026.376957]  [&amp;lt;ffffffffc12f1483&amp;gt;] ldlm_handle_enqueue0+0x903/0x1520 [ptlrpc]
[ 7026.385918]  [&amp;lt;ffffffffc1377932&amp;gt;] tgt_enqueue+0x62/0x210 [ptlrpc]
[ 7026.393851]  [&amp;lt;ffffffffc138027a&amp;gt;] tgt_request_handle+0xaea/0x1580 [ptlrpc]
[ 7026.402592]  [&amp;lt;ffffffffc132340b&amp;gt;] ptlrpc_server_handle_request+0x24b/0xab0 [ptlrpc]
[ 7026.412194]  [&amp;lt;ffffffffc1326c44&amp;gt;] ptlrpc_main+0xb14/0x1fb0 [ptlrpc]
[ 7026.420225]  [&amp;lt;ffffffffb98bb621&amp;gt;] kthread+0xd1/0xe0
[ 7026.426693]  [&amp;lt;ffffffffb9f205f7&amp;gt;] ret_from_fork_nospec_end+0x0/0x39
[ 7026.434746]  [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff
[ 7026.441364] LustreError: dumping log to /tmp/lustre-log.1535518483.6456
[ 7081.097862] INFO: task mdt00_008:4632 blocked for more than 120 seconds.
[ 7081.107689] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[ 7081.118799] mdt00_008       D ffff9bfd2cb7eeb0     0  4632      2 0x00000080
[ 7081.128951] Call Trace:
[ 7081.133752]  [&amp;lt;ffffffffb98c8204&amp;gt;] ? __wake_up+0x44/0x50
[ 7081.141630]  [&amp;lt;ffffffffb9f14029&amp;gt;] schedule+0x29/0x70
[ 7081.149050]  [&amp;lt;ffffffffb9f15915&amp;gt;] rwsem_down_write_failed+0x225/0x3a0
[ 7081.158119]  [&amp;lt;ffffffffb9b691bd&amp;gt;] ? list_del+0xd/0x30
[ 7081.165473]  [&amp;lt;ffffffffb9b5b587&amp;gt;] call_rwsem_down_write_failed+0x17/0x30
[ 7081.174637]  [&amp;lt;ffffffffb9f1319d&amp;gt;] down_write+0x2d/0x3d
[ 7081.181924]  [&amp;lt;ffffffffc1840f77&amp;gt;] lod_qos_statfs_update+0x97/0x2b0 [lod]
[ 7081.190933]  [&amp;lt;ffffffffc184307a&amp;gt;] lod_qos_prep_create+0x16a/0x17a0 [lod]
[ 7081.199838]  [&amp;lt;ffffffffc1503d70&amp;gt;] ? qsd_op_begin+0xb0/0x4d0 [lquota]
[ 7081.208310]  [&amp;lt;ffffffffc161fa98&amp;gt;] ? osd_declare_qid+0x1d8/0x480 [osd_ldiskfs]
[ 7081.217533]  [&amp;lt;ffffffffc18448c5&amp;gt;] lod_prepare_create+0x215/0x2e0 [lod]
[ 7081.226064]  [&amp;lt;ffffffffc18367ee&amp;gt;] lod_declare_striped_create+0x1ee/0x980 [lod]
[ 7081.235279]  [&amp;lt;ffffffffc1844e8f&amp;gt;] ? lod_sub_declare_create+0xdf/0x210 [lod]
[ 7081.244170]  [&amp;lt;ffffffffc183aec4&amp;gt;] lod_declare_create+0x204/0x590 [lod]
[ 7081.252490]  [&amp;lt;ffffffffc182a013&amp;gt;] ? lod_striping_from_default+0x93/0x5b0 [lod]
[ 7081.261638]  [&amp;lt;ffffffffc10dd969&amp;gt;] ? lu_context_refill+0x19/0x50 [obdclass]
[ 7081.270296]  [&amp;lt;ffffffffc18ad892&amp;gt;] mdd_declare_create_object_internal+0xe2/0x2f0 [mdd]
[ 7081.279987]  [&amp;lt;ffffffffc189d1c8&amp;gt;] mdd_declare_create+0x48/0xc10 [mdd]
[ 7081.288085]  [&amp;lt;ffffffffc18a15e9&amp;gt;] mdd_create+0x929/0x13f0 [mdd]
[ 7081.295636]  [&amp;lt;ffffffffc174ce37&amp;gt;] mdt_reint_open+0x2117/0x3160 [mdt]
[ 7081.303618]  [&amp;lt;ffffffffc10f2278&amp;gt;] ? upcall_cache_get_entry+0x218/0x8b0 [obdclass]
[ 7081.312849]  [&amp;lt;ffffffffc1740ce3&amp;gt;] mdt_reint_rec+0x83/0x210 [mdt]
[ 7081.320409]  [&amp;lt;ffffffffc17201d2&amp;gt;] mdt_reint_internal+0x6b2/0xa80 [mdt]
[ 7081.328539]  [&amp;lt;ffffffffc172c6c2&amp;gt;] mdt_intent_open+0x82/0x350 [mdt]
[ 7081.336260]  [&amp;lt;ffffffffc10bc6f9&amp;gt;] ? lprocfs_counter_add+0xf9/0x160 [obdclass]
[ 7081.345041]  [&amp;lt;ffffffffc172a768&amp;gt;] mdt_intent_policy+0x2f8/0xd10 [mdt]
[ 7081.353013]  [&amp;lt;ffffffffc172c640&amp;gt;] ? mdt_intent_fixup_resent+0x220/0x220 [mdt]
[ 7081.361816]  [&amp;lt;ffffffffc12c8e9e&amp;gt;] ldlm_lock_enqueue+0x34e/0xa50 [ptlrpc]
[ 7081.370088]  [&amp;lt;ffffffffc0fa26ee&amp;gt;] ? cfs_hash_add+0xbe/0x1a0 [libcfs]
[ 7081.377989]  [&amp;lt;ffffffffc12f1483&amp;gt;] ldlm_handle_enqueue0+0x903/0x1520 [ptlrpc]
[ 7081.386676]  [&amp;lt;ffffffffc13192d0&amp;gt;] ? lustre_swab_ldlm_lock_desc+0x30/0x30 [ptlrpc]
[ 7081.395867]  [&amp;lt;ffffffffc1377932&amp;gt;] tgt_enqueue+0x62/0x210 [ptlrpc]
[ 7081.403500]  [&amp;lt;ffffffffc138027a&amp;gt;] tgt_request_handle+0xaea/0x1580 [ptlrpc]
[ 7081.412018]  [&amp;lt;ffffffffc1358291&amp;gt;] ? ptlrpc_nrs_req_get_nolock0+0xd1/0x170 [ptlrpc]
[ 7081.421288]  [&amp;lt;ffffffffc0f93bde&amp;gt;] ? ktime_get_real_seconds+0xe/0x10 [libcfs]
[ 7081.430002]  [&amp;lt;ffffffffc132340b&amp;gt;] ptlrpc_server_handle_request+0x24b/0xab0 [ptlrpc]
[ 7081.439396]  [&amp;lt;ffffffffc1320295&amp;gt;] ? ptlrpc_wait_event+0xa5/0x360 [ptlrpc]
[ 7081.447788]  [&amp;lt;ffffffffb98cf682&amp;gt;] ? default_wake_function+0x12/0x20
[ 7081.455596]  [&amp;lt;ffffffffb98c52ab&amp;gt;] ? __wake_up_common+0x5b/0x90
[ 7081.462945]  [&amp;lt;ffffffffc1326c44&amp;gt;] ptlrpc_main+0xb14/0x1fb0 [ptlrpc]
[ 7081.470780]  [&amp;lt;ffffffffc1326130&amp;gt;] ? ptlrpc_register_service+0xe90/0xe90 [ptlrpc]
[ 7081.479853]  [&amp;lt;ffffffffb98bb621&amp;gt;] kthread+0xd1/0xe0
[ 7081.486103]  [&amp;lt;ffffffffb98bb550&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 7081.493716]  [&amp;lt;ffffffffb9f205f7&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[ 7081.501800]  [&amp;lt;ffffffffb98bb550&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 7091.714953] Pid: 4632, comm: mdt00_008 3.10.0-862.9.1.el7_lustre.x86_64 #1 SMP Fri Aug 17 20:37:05 UTC 2018
...
[ 7225.649157] LNet: Service thread pid 6456 completed after 400.00s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[ 7225.649191] LNet: Service thread pid 4632 completed after 334.69s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[ 7247.049160] LustreError: 11-0: soaked-OST0002-osc-MDT0002: operation ost_create to node 192.168.1.107@o2ib failed: rc = -19
[ 7247.063727] LustreError: 4525:0:(osp_precreate.c:647:osp_precreate_send()) soaked-OST0002-osc-MDT0002: can&apos;t precreate: rc = -19
[ 7247.080342] LustreError: 4525:0:(osp_precreate.c:1294:osp_precreate_thread()) soaked-OST0002-osc-MDT0002: cannot precreate objects: rc = -19
Aug 29 05:01:01 soak-10 TIME: Time stamp for console
[ 8225.327563] LNetError: 4307:0:(lib-msg.c:594:lnet_incr_hstats()) LBUG
[ 8225.336875] Pid: 4307, comm: kiblnd_connd 3.10.0-862.9.1.el7_lustre.x86_64 #1 SMP Fri Aug 17 20:37:05 UTC 2018
[ 8225.349882] Call Trace:
[ 8225.354452]  [&amp;lt;ffffffffc0f937cc&amp;gt;] libcfs_call_trace+0x8c/0xc0 [libcfs]
[ 8225.363505]  [&amp;lt;ffffffffc0f9387c&amp;gt;] lbug_with_loc+0x4c/0xa0 [libcfs]
[ 8225.372109]  [&amp;lt;ffffffffc100d61a&amp;gt;] lnet_finalize+0xb7a/0xe00 [lnet]
[ 8225.380637]  [&amp;lt;ffffffffc0bd15cd&amp;gt;] kiblnd_tx_done+0x10d/0x3e0 [ko2iblnd]
[ 8225.389573]  [&amp;lt;ffffffffc0bd18e6&amp;gt;] kiblnd_txlist_done+0x46/0x60 [ko2iblnd]
[ 8225.398487]  [&amp;lt;ffffffffc0bd2360&amp;gt;] kiblnd_abort_txs+0xf0/0x250 [ko2iblnd]
[ 8225.407115]  [&amp;lt;ffffffffc0bd6825&amp;gt;] kiblnd_finalise_conn+0x75/0x120 [ko2iblnd]
[ 8225.416096]  [&amp;lt;ffffffffc0bdb591&amp;gt;] kiblnd_connd+0x251/0xa00 [ko2iblnd]
[ 8225.424360]  [&amp;lt;ffffffffb98bb621&amp;gt;] kthread+0xd1/0xe0
[ 8225.430835]  [&amp;lt;ffffffffb9f205f7&amp;gt;] ret_from_fork_nospec_end+0x0/0x39
[ 8225.438857]  [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff
[ 8225.445439] Kernel panic - not syncing: LBUG
[ 8225.451189] CPU: 21 PID: 4307 Comm: kiblnd_connd Tainted: P           OE  ------------   3.10.0-862.9.1.el7_lustre.x86_64 #1
[ 8225.464746] Hardware name: Intel Corporation S2600GZ ........../S2600GZ, BIOS SE5C600.86B.01.08.0003.022620131521 02/26/2013
[ 8225.478323] Call Trace:
[ 8225.482095]  [&amp;lt;ffffffffb9f0e84e&amp;gt;] dump_stack+0x19/0x1b
[ 8225.488889]  [&amp;lt;ffffffffb9f08b50&amp;gt;] panic+0xe8/0x21f
[ 8225.495277]  [&amp;lt;ffffffffc0f938cb&amp;gt;] lbug_with_loc+0x9b/0xa0 [libcfs]
[ 8225.503226]  [&amp;lt;ffffffffc100d61a&amp;gt;] lnet_finalize+0xb7a/0xe00 [lnet]
[ 8225.511156]  [&amp;lt;ffffffffc0bd15cd&amp;gt;] kiblnd_tx_done+0x10d/0x3e0 [ko2iblnd]
[ 8225.519586]  [&amp;lt;ffffffffc0bd18e6&amp;gt;] kiblnd_txlist_done+0x46/0x60 [ko2iblnd]
[ 8225.528203]  [&amp;lt;ffffffffc0bd2360&amp;gt;] kiblnd_abort_txs+0xf0/0x250 [ko2iblnd]
[ 8225.536723]  [&amp;lt;ffffffffc0bd6825&amp;gt;] kiblnd_finalise_conn+0x75/0x120 [ko2iblnd]
[ 8225.545635]  [&amp;lt;ffffffffc0bdb591&amp;gt;] kiblnd_connd+0x251/0xa00 [ko2iblnd]
[ 8225.553845]  [&amp;lt;ffffffffb98cf670&amp;gt;] ? wake_up_state+0x20/0x20
[ 8225.561085]  [&amp;lt;ffffffffc0bdb340&amp;gt;] ? kiblnd_check_conns+0x870/0x870 [ko2iblnd]
[ 8225.570069]  [&amp;lt;ffffffffb98bb621&amp;gt;] kthread+0xd1/0xe0
[ 8225.576523]  [&amp;lt;ffffffffb98bb550&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 8225.584338]  [&amp;lt;ffffffffb9f205f7&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[ 8225.592644]  [&amp;lt;ffffffffb98bb550&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 8225.600518] Kernel Offset: 0x38800000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>lustre-master-ib build #120  lustre version: 2.11.54_54_ga7debf8</environment>
        <key id="53148">LU-11294</key>
            <summary>MDS kernel panic, LNetError: 4307:0:(lib-msg.c:594:lnet_incr_hstats()) LBUG</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="ashehata">Amir Shehata</assignee>
                                    <reporter username="sarah">Sarah Liu</reporter>
                        <labels>
                            <label>soak</label>
                    </labels>
                <created>Wed, 29 Aug 2018 17:29:47 +0000</created>
                <updated>Wed, 19 Dec 2018 21:12:22 +0000</updated>
                            <resolved>Thu, 30 Aug 2018 13:50:01 +0000</resolved>
                                    <version>Lustre 2.12.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="232754" author="pjones" created="Wed, 29 Aug 2018 17:43:41 +0000"  >&lt;p&gt;Amir&lt;/p&gt;

&lt;p&gt;This looks to be related to LNet Health changes...&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="232771" author="ashehata" created="Wed, 29 Aug 2018 21:42:46 +0000"  >&lt;p&gt;This has already been resolved by:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://review.whamcloud.com/#/c/33042/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/33042/&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;This patch and a few others need to land soon.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="53093">LU-11271</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i001f3:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>