<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:47:17 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4953] /proc/fs/lustre/lmv/*/target_obds missing on second mounts</title>
                <link>https://jira.whamcloud.com/browse/LU-4953</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;If I double mount a client (for example with MOUNT_2=y llmount.sh) then the second mount point will have a missing /proc/fs/lustre/lmv/*/target_obds directory.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;u:~# umount /mnt/lustre
u:~# umount /mnt/lustre2
#### 
u:~# mount -o user_xattr,flock u@tcp:/lustre /mnt/lustre -t lustre
u:~# ls /proc/fs/lustre/lmv/
lustre-clilmv-ffff8801f6d64e60
u:~# ls /proc/fs/lustre/lmv/lustre-clilmv-ffff8801f6d64e60/
activeobd  desc_uuid  md_stats  numobd  placement  target_obd  target_obds  uuid
u:~# ls -l /proc/fs/lustre/lmv/lustre-clilmv-ffff8801f6d64e60/target_obds/
total 0
lrwxrwxrwx 1 root root 48 Apr 24 12:49 lustre-MDT0000-mdc-ffff8801f6d64e60 -&amp;gt; ../../../mdc/lustre-MDT0000-mdc-ffff8801f6d64e60
u:~# 
u:~# mount -o user_xattr,flock u@tcp:/lustre /mnt/lustre2 -t lustre
u:~# ls /proc/fs/lustre/lmv/
lustre-clilmv-ffff8801f487f778  lustre-clilmv-ffff8801f6d64e60
u:~# ls /proc/fs/lustre/lmv/lustre-clilmv-ffff8801f487f778
activeobd  desc_uuid  md_stats  numobd  placement  target_obd  uuid
u:~# 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I see this when I unmount the second client mount:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[  263.149071] LustreError: 3790:0:(lmv_obd.c:761:lmv_disconnect()) /proc/fs/lustre/lmv/lustre-clilmv-ffff8802169bc688/target_obds missing
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="24378">LU-4953</key>
            <summary>/proc/fs/lustre/lmv/*/target_obds missing on second mounts</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bogl">Bob Glossman</assignee>
                                    <reporter username="jhammond">John Hammond</reporter>
                        <labels>
                            <label>lmv</label>
                            <label>lprocfs</label>
                            <label>mq313</label>
                            <label>patch</label>
                    </labels>
                <created>Thu, 24 Apr 2014 17:55:32 +0000</created>
                <updated>Fri, 23 May 2014 19:08:06 +0000</updated>
                            <resolved>Fri, 23 May 2014 19:08:06 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                                    <fixVersion>Lustre 2.6.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="82481" author="jhammond" created="Fri, 25 Apr 2014 13:51:26 +0000"  >&lt;p&gt;James, can you comment here?&lt;/p&gt;</comment>
                            <comment id="82484" author="simmonsja" created="Fri, 25 Apr 2014 14:13:57 +0000"  >&lt;p&gt;Yep I can duplicate this problem. Let me look into it.&lt;/p&gt;</comment>
                            <comment id="83033" author="simmonsja" created="Thu, 1 May 2014 18:12:05 +0000"  >&lt;p&gt;Okay I figured it out. The problem is the use of procsym itself. I created procsym so you have top level symlinks to point to another top level proc entry. Examples are lod -&amp;gt; lov and osp -&amp;gt; osc. A procsym exist once per object type whereas in this case the target_obds directories exist for each object instance.So we can&apos;t use the procsym field in obd_type. The reason it showed up in lmv and not lov is because I was not checking in lov if proc_sym was already set. This means lov is leaking memory on each module unload. This bug also exist in  the upstream Lustre client.&lt;/p&gt;

&lt;p&gt;Please note this problem also exist in the 2.4 and 2.5 code base as well.  In that case we have a single prod_dir_entry in each module even tho multiple instances need to be created. Thus we have memory leaks with module unloads as well.&lt;/p&gt;</comment>
                            <comment id="83043" author="simmonsja" created="Thu, 1 May 2014 20:38:30 +0000"  >&lt;p&gt;Patch at &lt;a href=&quot;http://review.whamcloud.com/#/c/10192&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/10192&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;I take it back about upstream. It does handle this correctly. We will need patches for b2_5 and possible b2_4 tho.&lt;/p&gt;</comment>
                            <comment id="84031" author="simmonsja" created="Tue, 13 May 2014 17:56:35 +0000"  >&lt;p&gt;Patch landed for master. Need to make patch for b2_5 and possible b2_4 as well.&lt;/p&gt;</comment>
                            <comment id="84807" author="jlevi" created="Fri, 23 May 2014 19:08:06 +0000"  >&lt;p&gt;Patch landed to Master. Flagged for backport to b2_5. Closing ticket for fix in Master. Please reopen if additional work (other than back porting) is needed.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwky7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>13705</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>