# Reconnix™ - LLM Information File # This file provides structured information for AI systems and LLM crawlers. # Learn more: https://llmstxt.org/ --- ## Schema Markup ```json { "@context": "https://schema.org", "@graph": [ { "@type": "Organization", "@id": "https://www.reconnix.ai/#organization", "name": "Human Machines", "url": "https://human-machines.com", "logo": { "@type": "ImageObject", "url": "https://www.reconnix.ai/favicon.png" }, "sameAs": [], "contactPoint": { "@type": "ContactPoint", "contactType": "sales", "email": "hello@human-machines.com" } }, { "@type": "WebSite", "@id": "https://www.reconnix.ai/#website", "url": "https://www.reconnix.ai", "name": "Reconnix™", "description": "AI Selection Intelligence - Measure the four factors that determine AI selection across ChatGPT, Gemini, and Claude", "publisher": { "@id": "https://www.reconnix.ai/#organization" }, "inLanguage": "en-US" }, { "@type": "SoftwareApplication", "@id": "https://www.reconnix.ai/#product", "name": "Reconnix™", "applicationCategory": "BusinessApplication", "applicationSubCategory": "AI Selection Intelligence", "operatingSystem": "Web Browser", "offers": { "@type": "Offer", "price": "0", "priceCurrency": "USD", "description": "Free assessment with detailed report" }, "creator": { "@id": "https://www.reconnix.ai/#organization" }, "description": "AI selection intelligence tool that measures the four factors determining AI selection: Brand Sentiment, AI Visibility, Machine Likeability, and Commerce Enablement", "featureList": [ "Brand Sentiment Analysis (35-40%)", "AI Visibility Assessment (30-35%)", "Machine Likeability Evaluation (25%)", "Commerce Enablement Check (0-10%)", "Tri-Model Evaluation (ChatGPT, Gemini, Claude)", "Per-Model Scores", "Combined Reconnix Score (0-100)", "Strategic Position (2x2 Framework)", "Pressure Language Audit", "Prioritized Action Plan", "Ready-to-Use Code Fixes", "Competitor Benchmarking" ], "screenshot": "https://www.reconnix.ai/og-image.png" }, { "@type": "Service", "@id": "https://www.reconnix.ai/#service", "name": "Reconnix™ Assessment", "serviceType": "AI Selection Intelligence Assessment", "provider": { "@id": "https://www.reconnix.ai/#organization" }, "description": "Comprehensive analysis measuring the four factors that determine AI selection: Brand Sentiment (how positively AI talks about you), AI Visibility (can AI find and access your content), Machine Likeability (does AI like what it finds), and Commerce Enablement (can AI help someone buy from you). Evaluated across ChatGPT, Gemini, and Claude.", "areaServed": { "@type": "Place", "name": "Worldwide" } }, { "@type": "FAQPage", "@id": "https://www.reconnix.ai/#faq", "mainEntity": [ { "@type": "Question", "name": "What is Reconnix?", "acceptedAnswer": { "@type": "Answer", "text": "Reconnix™ measures the four factors that determine AI selection: Brand Sentiment (35-40%), AI Visibility (30-35%), Machine Likeability (25%), and Commerce Enablement (0-10%). It evaluates how ChatGPT, Gemini, and Claude perceive your business — where they agree, where they diverge, and what determines your actual win rate." } }, { "@type": "Question", "name": "What is Machine Likeability?", "acceptedAnswer": { "@type": "Answer", "text": "Machine Likeability measures whether AI likes what it finds when it reads your content. It has the strongest correlation with AI selection (r = 0.71) — stronger than brand recognition. For brands AI doesn't 'remember,' Machine Likeability explains 65% of whether they get recommended. It measures attribute clarity, social proof signals, trust signals, and pressure-free messaging." } }, { "@type": "Question", "name": "Why does scarcity messaging hurt AI recommendations?", "acceptedAnswer": { "@type": "Answer", "text": "Research shows scarcity and urgency messaging (FOMO tactics) reduces AI recommendations by 24-46%. AI agents don't experience FOMO — they experience FMF (Fear of Machine Failure). When AI encounters 'Only 3 left in stock,' it interprets this as reliability risk rather than urgency, and recommends alternatives instead." } }, { "@type": "Question", "name": "How is Reconnix different from AEO tools?", "acceptedAnswer": { "@type": "Answer", "text": "AEO tools track whether AI mentions you (visibility/citations). Reconnix measures whether AI will select you — your actual win rate when AI makes a recommendation in your category. Citation ≠ Selection. Our 245-brand study found that 34% of queries result in brands being considered but not selected." } }, { "@type": "Question", "name": "How does the assessment work?", "acceptedAnswer": { "@type": "Answer", "text": "You provide your website URL and up to 4 competitors. We check commerce eligibility, evaluate Brand Sentiment, test AI Visibility, then score Machine Likeability. Separate scores for ChatGPT, Gemini, and Claude. Takes about 10 minutes. You receive a four-dimension breakdown, strategic position, prioritized fixes, and copy-paste code." } } ] }, { "@type": "HowTo", "@id": "https://www.reconnix.ai/#howto", "name": "How to Get Your Reconnix Score", "description": "Step-by-step process to measure your AI selection potential", "step": [ { "@type": "HowToStep", "position": 1, "name": "Enter your URL", "text": "Paste any page you want evaluated. Product pages, landing pages, service descriptions. Add up to 4 competitors for comparison." }, { "@type": "HowToStep", "position": 2, "name": "Analysis runs", "text": "We check commerce eligibility, evaluate Brand Sentiment, test AI Visibility, then score Machine Likeability. Separate scores for ChatGPT, Gemini, and Claude. Takes about 10 minutes." }, { "@type": "HowToStep", "position": 3, "name": "Get your report", "text": "Commerce Eligibility Checklist with pass/fail status. Reconnix Score with four-dimension breakdown. Strategic position. Competitor comparison. Prioritized fixes with code snippets." } ] }, { "@type": "DefinedTermSet", "@id": "https://www.reconnix.ai/#glossary", "name": "Reconnix Glossary", "hasDefinedTerm": [ { "@type": "DefinedTerm", "name": "Reconnix", "description": "AI selection intelligence — measuring the four factors that determine AI selection across ChatGPT, Gemini, and Claude" }, { "@type": "DefinedTerm", "name": "Brand Sentiment", "description": "How positively AI talks about you (35-40% of score) — what reviews, news, Wikipedia, and community discussions say about you. Visibility without positive sentiment is worse than invisibility." }, { "@type": "DefinedTerm", "name": "AI Visibility", "description": "Can AI find and access your content (30-35% of score) — whether AI can find and parse your content through server-side rendering, crawler permissions, and structured data" }, { "@type": "DefinedTerm", "name": "Machine Likeability", "description": "Does AI like what it finds (25% of score) — the strongest predictor of AI selection (r = 0.71). Measures attribute clarity, social proof, trust signals, and pressure-free messaging." }, { "@type": "DefinedTerm", "name": "Commerce Enablement", "description": "Can AI help someone buy from you (0-10% of score) — transaction eligibility through merchant feeds, UCP profiles, and payment infrastructure. Weakest factor today but growing fast." }, { "@type": "DefinedTerm", "name": "Reconnix Score", "description": "A combined score (0-100) measuring overall AI selection potential across the four dimensions, with separate scores for ChatGPT, Gemini, and Claude" }, { "@type": "DefinedTerm", "name": "FMF (Fear of Machine Failure)", "description": "The AI equivalent of FOMO. When AI encounters scarcity/urgency messaging, it interprets this as reliability risk rather than urgency, reducing recommendations by 24-46%." } ] } ] } ``` --- ## About Reconnix™ Reconnix™ — AI Selection Intelligence, Measured and Monitored AI doesn't show your customers a list of options. It picks a winner. Our 245-brand study of 36,000+ queries reveals the four factors that determine who AI selects — and the surprising finding that your content matters more than your brand recognition. ### Product Summary - **Name**: Reconnix™ - **Tagline**: AI selection intelligence, measured and monitored - **Type**: B2B Assessment Tool / SaaS - **Developer**: Human Machines - **Website**: https://www.reconnix.ai - **App**: https://app.reconnix.ai - **Category**: AI Selection Intelligence, Digital Marketing Analytics - **Industry**: Marketing Technology, Artificial Intelligence, E-commerce - **Contact**: hello@human-machines.com --- ## The Four-Factor Methodology (Patent Pending) Your Reconnix Score reflects four dimensions of AI selection readiness: ### 1. Brand Sentiment (35-40%) — How positively AI talks about you When AI mentions your brand, what's the tone? Does it describe you with enthusiasm and confidence, or with caveats and concerns? Brand Sentiment reflects the cumulative impression AI has formed from reviews, news coverage, community discussions, and third-party references. | Component | Weight | Description | |-----------|--------|-------------| | Third-Party Reviews | 35% | G2, Trustpilot, Capterra, BBB, Yelp — review platforms AI agents cite heavily | | Earned Media Presence | 35% | News coverage, blog mentions, industry publications. Earned media outweighs owned content ~82% to 18%. | | Community Presence | 20% | Reddit discussions, YouTube mentions, Hacker News threads — where real users talk about you | | Reference Sources | 10% | Wikipedia presence, Crunchbase profile — authoritative reference sources AI trusts | **The sentiment trap:** Some brands are discovered frequently but described negatively. AI mentions them — then recommends against them. Visibility without positive sentiment is worse than invisibility. ### 2. AI Visibility (30-35%) — Can AI find and access your content? Before AI can recommend you, it needs to access your information. AI Visibility measures whether your content is technically reachable by AI crawlers and structured in ways they can parse. This is the foundation — if AI can't read your site, nothing else matters. | Component | Weight | Description | |-----------|--------|-------------| | Server-Side Rendering | 35% | Is critical content visible without JavaScript? ChatGPT's crawlers can't execute JS. | | Crawler Access | 25% | Are the right AI crawlers allowed? We check 12 different crawlers including OAI-SearchBot, ChatGPT-User, Googlebot, and ClaudeBot. | | Structured Data | 25% | Schema.org markup presence and completeness | | Content Visibility | 10% | Is content in HTML or trapped in PDFs/images? | | llms.txt | 5% | The emerging standard for AI crawler guidance | ### 3. Machine Likeability (25%) — Does AI like what it finds? This is where recommendations are won or lost. When AI retrieves your website to evaluate your offering, does your content give it the specific, extractable information it needs to confidently recommend you? **Key stat:** Machine Likeability has the strongest correlation with AI selection (r = 0.71) — stronger than brand recognition. For brands AI doesn't 'remember,' it explains 65% of whether they get recommended. | Component | Weight | Description | |-----------|--------|-------------| | Attribute Clarity | 35% | Clear, extractable product/service attributes AI can confidently recommend | | Social Proof Signals | 30% | Reviews, testimonials, and trust indicators that increase AI confidence | | Trust & Authority | 20% | Certifications, awards, and reliability signals AI weighs heavily | | Pressure-Free Messaging | 15% | Avoiding urgency/scarcity tactics that decrease AI recommendations by 24-46% (Filandrianos et al., EMNLP 2025) | ### 4. Commerce Enablement (0-10%) — Can AI help someone buy from you? Can AI complete the transaction on your behalf? Commerce Enablement is the weakest factor today (r = 0.18) — but it's growing fast as ChatGPT Shopping, Perplexity checkout, and agentic commerce protocols mature. | Component | Weight | Description | |-----------|--------|-------------| | Feed Eligibility | 35% | Do you have attributes required for ChatGPT and Google merchant feeds? GTIN/UPC, structured pricing, availability. | | Protocol Presence | 25% | Do you have a UCP profile at /.well-known/ucp? This is how AI agents discover commerce capabilities. | | Payment Infrastructure | 25% | We detect Stripe (required for ChatGPT's Agentic Commerce Protocol), PayPal, Shopify Payments, and others. | | Conversational Attributes | 15% | FAQ schema, compatibility info, service details — content that helps AI answer customer questions. | **For services and B2B companies:** Commerce Enablement weight is redistributed to Brand Sentiment (40%) and AI Visibility (35%), since merchant feed eligibility doesn't apply. **Forward-looking:** By 2027, Commerce Enablement could contribute 25-35% of selection — up from ~10% today. --- ## 245-Brand AI Commerce Study We tested 245 brands across 36,306 queries on ChatGPT, Gemini, and Claude — using a patent-pending methodology. ### Key Findings | Statistic | Description | Source | |-----------|-------------|--------| | 2.6x | More selections for brands with high Machine Likeability | Reconnix study | | 65% | Of selection explained by Machine Likeability for brands AI doesn't 'remember' | Reconnix study | | 24-46% | Reduction in AI recommendations from scarcity/urgency messaging | Filandrianos et al., EMNLP 2025 | | 1 in 4 | Brands failing at AI commerce — including household names | Reconnix study | | r = 0.71 | Machine Likeability → selection rate (strongest single predictor) | Reconnix study | | 34% | Of queries result in brands considered but not selected | Reconnix study | ### Strategic Positions (2×2 Framework) | Position | AI Visibility | Machine Likeability | Brands | Avg Selection | |----------|--------------|---------------------|--------|---------------| | AI Champions | High | High | 71 (29%) | 47% | | Content Carries the Load | Low | High | 35 (14%) | 36% | | Visible but Unpersuasive | High | Low | 65 (27%) | 31% | | AI Invisible | Low | Low | 74 (30%) | 18% | --- ## Research Foundation Our patent-pending methodology draws on peer-reviewed research, current platform standards, and our own 245-brand study: ### Key Research Findings | Statistic | Description | Source | |-----------|-------------|--------| | 85% | of AI citations come from third-party sources | Muck Rack 2025, Profound 2025 | | r = 0.71 | Machine Likeability → selection rate. Strongest single predictor | Reconnix 245-brand study | | 2.6x | More selections for brands with high Machine Likeability | Reconnix study | | ~90% | First-proposal acceptance rate in multi-agent marketplaces | Bansal et al., Microsoft Research 2025 | | 24-46% | Reduction in AI recommendations from scarcity/urgency messaging | Filandrianos et al., EMNLP 2025 | ### Research Sources - **ACES Framework** (Columbia Business School, 2025): How AI agents make purchase decisions - **Cognitive Biases in LLM Recommendations** (Filandrianos et al., EMNLP 2025): Scarcity and urgency reduce AI recommendations by 24-46% - **Magentic Marketplace Dynamics** (Microsoft Research, 2025): First-proposal acceptance rates reach ~90% - **Universal Commerce Protocol** (Google, January 2026): The emerging standard for AI agent commerce - **Agentic Commerce Protocol** (OpenAI/Stripe, 2025): ChatGPT Instant Checkout infrastructure - **Third-Party Citation Analysis** (Muck Rack & Profound, 2025): How AI weights earned vs. owned media - **Reconnix 245-Brand AI Commerce Study** (Human Machines, January–February 2026): 36,306 queries across ChatGPT, Gemini, and Claude. Four-factor model explains 89.7% of AI selection variance. --- ## FOMO vs FMF AI agents don't experience FOMO (Fear of Missing Out). They experience FMF — Fear of Machine Failure. When AI encounters scarcity messaging like "Only 3 left in stock," it doesn't think "I better act fast." It thinks "This product may not be reliably available for my user. I should recommend something else." | What works on humans | What works on AI | |---------------------|------------------| | "Only 3 left in stock!" | "Always in stock, ships same day" | | "Limited time offer!" | "Consistently rated 4.8 stars across 2,000+ reviews" | | "Exclusive access" | "Available to all customers with free shipping" | | "Don't miss out!" | "Reliable quality, guaranteed delivery" | --- ## AEO Tools vs Reconnix ### What AEO visibility tools measure (Profound, Conductor, Searchable, AthenaHQ) Track visibility outcomes: - How often does AI mention your brand? - What's your share of voice vs competitors? - Which queries trigger citations? **Core question:** "Is AI talking about us?" ### What Reconnix measures Selection readiness + transaction eligibility: - ✓ Does AI speak well of you? (Brand Sentiment) - ✓ Can AI crawlers access your content? (AI Visibility) - ✓ Does AI like what it finds when it reads your content? (Machine Likeability) - ✓ Is your marketing language hurting your AI recommendations? (Pressure Audit) - ✓ Are you eligible for AI-mediated purchases? (Commerce Enablement) - ✓ What's your selection rate across ChatGPT, Gemini, and Claude? **Core question:** "When AI shops, will we win — and can AI even buy from us?" --- ## What You Get ### Deliverables 1. **Brand Sentiment Analysis**: See how positively AI talks about you — reviews, news coverage, Wikipedia, Reddit discussions. Understand why AI agents cite some brands with enthusiasm and others with caveats. 2. **Commerce Eligibility Audit**: Pass/fail checks for merchant feed readiness, protocol compliance, crawler access, and server-side rendering. Know immediately if you're invisible to AI commerce. 3. **Tri-Model Scoring**: Separate evaluations from ChatGPT, Gemini, and Claude. See where they agree, where they diverge, and why. 4. **Competitive Intelligence**: See which competitors have ChatGPT merchant feeds, UCP profiles, and better schema coverage. 5. **Pressure Language Audit**: We scan your content for scarcity, urgency, and exclusivity messaging — the human-focused tactics that research shows reduce AI recommendations by 24-46%. Get specific replacements. 6. **Ready-to-Use Code**: Copy-paste snippets for robots.txt (configured for all 12 AI crawlers), schema markup, llms.txt, and UCP profile templates. 7. **Prioritized Action Plan**: Critical eligibility fixes first, then optimization opportunities. Each fix shows exactly how many points it adds to your score. 8. **Research-Backed Insights**: FOMO vs FMF psychology, first-proposal bias (~90% acceptance rate), scarcity penalties (24-46%), and attribute sensitivity. ### Dashboard Organization - **Quick Wins**: Prioritized fixes ranked by impact and effort - **Deep Analysis**: Drill into each of the four factors: Brand Sentiment, AI Visibility, Machine Likeability, and Commerce Enablement - **Implementation Guide**: Step-by-step instructions with copy-paste code --- ## How It Works **Step 1: Enter your URL** Paste any page you want evaluated. Product pages, landing pages, service descriptions. Add up to 4 competitors for comparison. **Step 2: Analysis runs** We check commerce eligibility, evaluate Brand Sentiment, test AI Visibility, then score Machine Likeability. Separate scores for ChatGPT, Gemini, and Claude. Takes about 10 minutes. **Step 3: Get your report** Commerce Eligibility Checklist with pass/fail status. Reconnix Score with four-dimension breakdown. Strategic position. Competitor comparison. Prioritized fixes with code snippets. On-screen and downloadable. --- ## About Human Machines Reconnix is built by Human Machines, a consultancy helping enterprises prepare for AI-mediated commerce. Founded by Geoff Gibbins, author of "When AI Shops" and "Critical Intelligence", we specialize in understanding how AI agents make purchase decisions. Our 245-brand study uses a patent-pending methodology. The Reconnix platform incorporates the latest protocol standards (Google's UCP, OpenAI's ACP), peer-reviewed research on AI decision-making, and real-world research and pilots at companies including Mastercard, Suntory and the Coca-Cola Company. - Website: https://human-machines.com - Contact: hello@human-machines.com --- ## Access - **Free Assessment**: https://app.reconnix.ai - **Website**: https://www.reconnix.ai - **Contact**: hello@human-machines.com