Examples - Company Search API

Examples

We've provided code samples in Python, cURL, Ruby, Go and JavaScript. If you aren't comfortable working in any of these languages, feel free to use this handy tool to convert code from cURL to the language of your choice.

❗️

Heads Up! Credit Usage

Company Search API calls cost the number of total search results returned.

If you are making a search that could have a large number of results, make sure to use the size parameter to set the maximum number of results and cap your credit usage.

💡

We want your feedback!

Do you see a bug? Is there an example you'd like to see that's not listed here?

Head over to the public roadmap and submit a bug ticket or a feature request and receive automatic notifications as your bug is resolved or your request is implemented.

Basic Usage

"I want to make a query and save the results to a file."

import json

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"term": {"website": "google.com"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
  'query': ES_QUERY,
  'size': 10,
  'pretty': True
}

# Pass the parameters object to the Company Search API
response = CLIENT.company.search(**PARAMS).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
  print("Error:", response)
import json

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# Create an SQL query
SQL_QUERY = \
"""
  SELECT * FROM company
  WHERE website='google.com';
 """

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 10,
  'pretty': True
}

# Pass the parameters object to the Company Search API
response = CLIENT.company.search(**PARAMS).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
  print("Error:", response)
# Elasticsearch
curl -X GET 'https://api.peopledatalabs.com/v5/company/search' \
-H 'X-Api-Key: xxxx' \
--data-raw '{
  "size": 10,
  "query": {
    "bool": {
      "must": [
        {"term": {"website": "google.com"}}
      ]
    }
  }
}'

# SQL
curl -X GET \
  'https://api.peopledatalabs.com/v5/company/search' \
  -H 'X-Api-Key: xxxx' \
  --data-raw '{
    "size": 10,
    "sql": "SELECT * FROM company WHERE website='\''google.com'\'';"
}'
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

import fs from 'fs';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Create an Elasticsearch query
const esQuery = {
  query: {
    bool: {
      must:[
        {"term": {"website": "google.com"}} 
      ]
    }
  }
}

// Create a parameters JSON object
const params = {
  searchQuery: esQuery, 
  size: 10,
  pretty: true
}

// Pass the parameters object to the Company Search API
PDLJSClient.company.search.elastic(params).then((data) => {
    // Write out all profiles found to file
    fs.writeFile("my_pdl_search.jsonl", Buffer.from(JSON.stringify(data.data)), (err) => {
        if (err) throw err;
    });
    console.log(`Successfully grabbed ${data.data.length} records from PDL.`);
    console.log(`${data["total"]} total PDL records exist matching this query.`)
}).catch((error) => {
    console.log("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
    console.log(error);
});
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

import fs from 'fs';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Create an SQL query
const sqlQuery = `SELECT * FROM company
                    WHERE website='google.com';`;

// Create a parameters JSON object
const params = {
    searchQuery: sqlQuery, 
  	size: 10,
  	pretty: true
}

// Pass the parameters object to the Company Search API
PDLJSClient.company.search.sql(params).then((data) => {
    // Write out all profiles found to file
    fs.writeFile("my_pdl_search.jsonl", Buffer.from(JSON.stringify(data.data)), (err) => {
        if (err) throw err;
    });
    console.log(`Successfully grabbed ${data.data.length} records from PDL.`);
    console.log(`${data["total"]} total PDL records exist matching this query.`)
}).catch((error) => {
    console.log("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
    console.log(error);
});
require 'json'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"term": {"website": "google.com"}},
      ]
    }
  }
}

# Pass parameters to the Company Search API
response = Peopledatalabs::Search.company(searchType: 'elastic', query: ES_QUERY, size: 10, pretty: true)

# Check for successful response
if response['status'] == 200
    data = response['data']
    # Write out each profile found to file
    File.open("my_pdl_search.jsonl", "w") do |out|
        data.each { |record| out.write(JSON.dump(record) + "\n") }
    end
    puts "Successfully grabbed #{data.length()} records from PDL."
    puts "#{response['total']} total PDL records exist matching this query."
else
    puts "NOTE: The carrier pigeons lost motivation in flight. See error and try again."
    puts "Error: #{response}"
end
require 'json'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# Create an SQL query
SQL_QUERY = \
"""
  SELECT * FROM company
  WHERE website='google.com';
 """

# Pass parameters to the Company Search API
response = Peopledatalabs::Search.company(searchType: 'sql', query: SQL_QUERY, size: 10, pretty: true)

# Check for successful response
if response['status'] == 200
    data = response['data']
    # Write out each profile found to file
    File.open("my_pdl_search.jsonl", "w") do |out|
        data.each { |record| out.write(JSON.dump(record) + "\n") }
    end
    puts "Successfully grabbed #{data.length()} records from PDL."
    puts "#{response['total']} total PDL records exist matching this query."
else
    puts "NOTE: The carrier pigeons lost motivation in flight. See error and try again."
    puts "Error: #{response}"
end
package main

import(
    "fmt"
    "os"
    "encoding/json"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // Create an Elasticsearch query
    elasticSearchQuery := map[string]interface{} {
        "query": map[string]interface{} {
            "bool": map[string]interface{} {
                "must": []map[string]interface{} {
                    {"term": map[string]interface{}{"website": "google.com"}},
                },
            },
        },
    }

    // Create a parameters JSON object
    params := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 10,
            Pretty: true,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            Query: elasticSearchQuery,
        },
    }
    
    // Pass the parameters object to the Company Search API
    response, err := client.Company.Search(context.Background(), params)
    // Check for successful response
    if err == nil {
        data := response.Data
        // Create file
        out, outErr := os.Create("my_pdl_search.jsonl")
        defer out.Close()
        if (outErr == nil) {
            for i := range data {
                // Convert each profile found to JSON
                record, jsonErr := json.Marshal(data[i])
                // Write out each profile to file
                if (jsonErr == nil) {
                    out.WriteString(string(record) + "\n")
                }
            }
            out.Sync()
        }
        fmt.Printf("Successfully grabbed %d records from PDL.\n", len(data))
        fmt.Printf("%d total PDL records exist matching this query.\n", response.Total)
    } else {
        fmt.Println("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
        fmt.Println("Error:", err)
    } 
}
package main

import(
    "fmt"
    "os"
    "encoding/json"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // Create an SQL query
    sqlQuery := "SELECT * FROM company" +
        " WHERE website='google.com';"

    // Create a parameters JSON object
    params := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 10,
            Pretty: true,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            SQL: sqlQuery,
        },
    }
    
    // Pass the parameters object to the Company Search API
    response, err := client.Company.Search(context.Background(), params)
    // Check for successful response
    if err == nil {
        data := response.Data
        // Create file
        out, outErr := os.Create("my_pdl_search.jsonl")
        defer out.Close()
        if (outErr == nil) {
            for i := range data {
                // Convert each profile found to JSON
                record, jsonErr := json.Marshal(data[i])
                // Write out each profile to file
                if (jsonErr == nil) {
                    out.WriteString(string(record) + "\n")
                }
            }
            out.Sync()
        }
        fmt.Printf("Successfully grabbed %d records from PDL.\n", len(data))
        fmt.Printf("%d total PDL records exist matching this query.\n", response.Total)
    } else {
        fmt.Println("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
        fmt.Println("Error:", err)
    } 
}
import requests, json

# Set your API key
API_KEY = "YOUR API KEY"

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  'Content-Type': "application/json",
  'X-api-key': API_KEY
}

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"term": {"website": "google.com"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
  'query': json.dumps(ES_QUERY),
  'size': 10,
  'pretty': True
}

# Pass the parameters object to the Company Search API
response = requests.get(
  PDL_URL,
  headers=HEADERS,
  params=PARAMS
).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
  print("Error:", response)
import requests, json

# Set your API key
API_KEY = "YOUR API KEY"

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  'Content-Type': "application/json",
  'X-api-key': API_KEY
}

# Create an SQL query
SQL_QUERY = \
"""
  SELECT * FROM company
  WHERE website='google.com';
 """

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 10,
  'pretty': True
}

# Pass the parameters object to the Company Search API
response = requests.get(
  PDL_URL,
  headers=HEADERS,
  params=PARAMS
).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
  print("error:", response)

Using POST Requests

"I would like to use POST requests to query instead of GET requests so that I can make queries with a lot of parameters."

📘

Difference Between GET and POST Requests

See this article for a comparison of the differences between GET and POST requests. The biggest difference is that POST requests don't have any limit on the amount of data that you can pass in a request.

import requests, json

# Set your API key
API_KEY = "YOUR API KEY"

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  'Content-Type': "application/json",
  'X-api-key': API_KEY
}

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"term": {"website": "google.com"}},
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
  'query': ES_QUERY, # This is a different syntax than when using GET requests
  'size': 10,
  'pretty': True
}

# Pass the parameters object to the Person Search API using POST method
response = requests.post( # Using POST method
  PDL_URL,
  headers=HEADERS,
  json=PARAMS # Pass the data directly as a JSON object
  # data=json.dumps(PARAMS) # This is an alternative way of passing data using a string
).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
  print("Error:", response)
import requests, json

# Set your API key
API_KEY = # YOUR API KEY

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  'Content-Type': "application/json",
  'X-api-key': API_KEY
}

# Create an SQL query
SQL_QUERY = \
"""
  SELECT * FROM company
  WHERE website='google.com';
 """

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 10,
  'pretty': True
}

# Pass the parameters object to the Person Search API using POST method
response = requests.post( # Using POST method
  PDL_URL,
  headers=HEADERS,
  json=PARAMS # Pass the data directly as a JSON object
  # data=json.dumps(PARAMS) # This is an alternative way of passing data using a string
).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
  print("Error:", response)
# Elasticsearch
curl -X POST 'https://api.peopledatalabs.com/v5/company/search' \
  -H 'X-Api-Key: your-api-key' \
  -H 'Content-Type: application/json' \
  -d '{
  "size": 10,
  "query": {
    "bool": {
      "must": [
        {"term": {"website": "google.com"}}
      ]
    }
  }
}'

# SQL
curl -X POST \
  'https://api.peopledatalabs.com/v5/company/search' \
  -H 'X-Api-Key: your-api-key' \
  -H 'Content-Type: application/json' \
  -d '{
    "size": 10,
    "sql": "SELECT * FROM company WHERE website='\''google.com'\''"
}'

Company Search by Tags

"I want to find US-based companies tagged as 'big data' in the financial services industry."

import json

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"term": {"tags": "big data"}},
        {"term": {"industry": "financial services"}},
        {"term": {"location.country": "united states"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
  'query': ES_QUERY,
  'size': 10,
  'pretty': True
}

# Pass the parameters object to the Company Search API
response = CLIENT.company.search(**PARAMS).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
  print("Error:", response)
import json

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# Create an SQL query
SQL_QUERY = \
"""
  SELECT * FROM company
  WHERE tags='big data'
  AND industry='financial services'
  AND location.country='united states';
 """

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 10,
  'pretty': True
}

# Pass the parameters object to the Company Search API
response = CLIENT.company.search(**PARAMS).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
  print("Error:", response)
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

import fs from 'fs';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Create an Elasticsearch query
const esQuery = {
  query: {
    bool: {
      must:[
        {"term": {"tags": "big data"}},
        {"term": {"industry": "financial services"}},
        {"term": {"location.country": "united states"}}
      ]
    }
  }
}

// Create a parameters JSON object
const params = {
  searchQuery: esQuery, 
  size: 10,
  pretty: true
}

// Pass the parameters object to the Company Search API
PDLJSClient.company.search.elastic(params).then((data) => {
    // Write out all profiles found to file
    fs.writeFile("my_pdl_search.jsonl", Buffer.from(JSON.stringify(data.data)), (err) => {
        if (err) throw err;
    });
    console.log(`Successfully grabbed ${data.data.length} records from PDL.`);
    console.log(`${data["total"]} total PDL records exist matching this query.`)
}).catch((error) => {
    console.log("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
    console.log(error);
});
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

import fs from 'fs';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Create an SQL query
const sqlQuery = `SELECT * FROM company
                    WHERE tags='big data'
                    AND industry='financial services'
                    AND location.country='united states';`;

// Create a parameters JSON object
const params = {
    searchQuery: sqlQuery, 
  	size: 10,
  	pretty: true
}

// Pass the parameters object to the Company Search API
PDLJSClient.company.search.sql(params).then((data) => {
    // Write out all profiles found to file
    fs.writeFile("my_pdl_search.jsonl", Buffer.from(JSON.stringify(data.data)), (err) => {
        if (err) throw err;
    });
    console.log(`Successfully grabbed ${data.data.length} records from PDL.`);
    console.log(`${data["total"]} total PDL records exist matching this query.`)
}).catch((error) => {
    console.log("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
    console.log(error);
});
require 'json'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"term": {"tags": "big data"}},
        {"term": {"industry": "financial services"}},
        {"term": {"location.country": "united states"}}
      ]
    }
  }
}

# Pass parameters to the Company Search API
response = Peopledatalabs::Search.company(searchType: 'elastic', query: ES_QUERY, size: 10, pretty: true)

# Check for successful response
if response['status'] == 200
    data = response['data']
    # Write out each profile found to file
    File.open("my_pdl_search.jsonl", "w") do |out|
        data.each { |record| out.write(JSON.dump(record) + "\n") }
    end
    puts "Successfully grabbed #{data.length()} records from PDL."
    puts "#{response['total']} total PDL records exist matching this query."
else
    puts "NOTE: The carrier pigeons lost motivation in flight. See error and try again."
    puts "Error: #{response}"
end
require 'json'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# Create an SQL query
SQL_QUERY = \
"""
  SELECT * FROM company
  WHERE tags='big data'
  AND industry='financial services'
  AND location.country='united states';
 """

# Pass parameters to the Company Search API
response = Peopledatalabs::Search.company(searchType: 'sql', query: SQL_QUERY, size: 10, pretty: true)

# Check for successful response
if response['status'] == 200
    data = response['data']
    # Write out each profile found to file
    File.open("my_pdl_search.jsonl", "w") do |out|
        data.each { |record| out.write(JSON.dump(record) + "\n") }
    end
    puts "Successfully grabbed #{data.length()} records from PDL."
    puts "#{response['total']} total PDL records exist matching this query."
else
    puts "NOTE: The carrier pigeons lost motivation in flight. See error and try again."
    puts "Error: #{response}"
end
package main

import(
    "fmt"
    "os"
    "encoding/json"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // Create an Elasticsearch query
    elasticSearchQuery := map[string]interface{} {
        "query": map[string]interface{} {
            "bool": map[string]interface{} {
                "must": []map[string]interface{} {
                    {"term": map[string]interface{}{"tags": "big data"}},
                    {"term": map[string]interface{}{"industry": "financial services"}},
                    {"term": map[string]interface{}{"location.country": "united states"}},
                },
            },
        },
    }

    // Create a parameters JSON object
    params := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 10,
            Pretty: true,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            Query: elasticSearchQuery,
        },
    }
    
    // Pass the parameters object to the Company Search API
    response, err := client.Company.Search(context.Background(), params)
    // Check for successful response
    if err == nil {
        data := response.Data
        // Create file
        out, outErr := os.Create("my_pdl_search.jsonl")
        defer out.Close()
        if (outErr == nil) {
            for i := range data {
                // Convert each profile found to JSON
                record, jsonErr := json.Marshal(data[i])
                // Write out each profile to file
                if (jsonErr == nil) {
                    out.WriteString(string(record) + "\n")
                }
            }
            out.Sync()
        }
        fmt.Printf("Successfully grabbed %d records from PDL.\n", len(data))
        fmt.Printf("%d total PDL records exist matching this query.\n", response.Total)
    } else {
        fmt.Println("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
        fmt.Println("Error:", err)
    } 
}
package main

import(
    "fmt"
    "os"
    "encoding/json"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // Create an SQL query
    sqlQuery := "SELECT * FROM company" +
        " WHERE tags='big data'" +
        " AND industry='financial services'" +
        " AND location.country='united states';"

    // Create a parameters JSON object
    params := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 10,
            Pretty: true,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            SQL: sqlQuery,
        },
    }
    
    // Pass the parameters object to the Company Search API
    response, err := client.Company.Search(context.Background(), params)
    // Check for successful response
    if err == nil {
        data := response.Data
        // Create file
        out, outErr := os.Create("my_pdl_search.jsonl")
        defer out.Close()
        if (outErr == nil) {
            for i := range data {
                // Convert each profile found to JSON
                record, jsonErr := json.Marshal(data[i])
                // Write out each profile to file
                if (jsonErr == nil) {
                    out.WriteString(string(record) + "\n")
                }
            }
            out.Sync()
        }
        fmt.Printf("Successfully grabbed %d records from PDL.\n", len(data))
        fmt.Printf("%d total PDL records exist matching this query.\n", response.Total)
    } else {
        fmt.Println("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
        fmt.Println("Error:", err)
    } 
}
import requests, json

# Set your API key
API_KEY = "YOUR API KEY"

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  'Content-Type': "application/json",
  'X-api-key': API_KEY
}

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"term": {"tags": "big data"}},
        {"term": {"industry": "financial services"}},
        {"term": {"location.country": "united states"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
  'query': json.dumps(ES_QUERY),
  'size': 10,
  'pretty': True
}

# Pass the parameters object to the Company Search API
response = requests.get(
  PDL_URL,
  headers=HEADERS,
  params=PARAMS
).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
  print("Error:", response)
import requests, json

# Set your API key
API_KEY = "YOUR API KEY"

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  'Content-Type': "application/json",
  'X-api-key': API_KEY
}

# Create an SQL query
SQL_QUERY = \
"""
  SELECT * FROM company
  WHERE tags='big data'
  AND industry='financial services'
  AND location.country='united states';
 """

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 10,
  'pretty': True
}

# Pass the parameters object to the Company Search API
response = requests.get(
  PDL_URL,
  headers=HEADERS,
  params=PARAMS
).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
  print("Error:", response)

Sales and Marketing

"I want to find companies offering account-based marketing services in the United States."

import json

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"match": {"summary": "account based marketing"}},
        {"term": {"location.country" : "united states"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
  'query': ES_QUERY,
  'size': 100
}

# Pass the parameters object to the Company Search API
response = CLIENT.company.search(**PARAMS).json()

# Check for successful response
if response["status"] == 200:
  
  data = response['data']
  
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The eager beaver was not so eager. See error and try again.")
  print("error:", response)
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

import fs from 'fs';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Create an Elasticsearch query
const esQuery = {
  query: {
    bool: {
      must:[
        {"match": {"summary": "account based marketing"}},
        {"term": {"location.country" : "united states"}}
      ]
    }
  }
}

// Create a parameters JSON object
const params = {
  searchQuery: esQuery, 
  size: 100,
}

// Pass the parameters object to the Company Search API
PDLJSClient.company.search.elastic(params).then((data) => {
    // Write out all profiles found to file
    fs.writeFile("my_pdl_search.jsonl", Buffer.from(JSON.stringify(data.data)), (err) => {
        if (err) throw err;
    });
    console.log(`Successfully grabbed ${data.data.length} records from PDL.`);
    console.log(`${data["total"]} total PDL records exist matching this query.`)
}).catch((error) => {
    console.log("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
    console.log(error);
});
require 'json'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"match": {"summary": "account based marketing"}},
        {"term": {"location.country": "united states"}}
      ]
    }
  }
}

# Pass parameters to the Company Search API
response = Peopledatalabs::Search.company(searchType: 'elastic', query: ES_QUERY, size: 100, pretty: true)

# Check for successful response
if response['status'] == 200
    data = response['data']
    # Write out each profile found to file
    File.open("my_pdl_search.jsonl", "w") do |out|
        data.each { |record| out.write(JSON.dump(record) + "\n") }
    end
    puts "Successfully grabbed #{data.length()} records from PDL."
    puts "#{response['total']} total PDL records exist matching this query."
else
    puts "NOTE: The carrier pigeons lost motivation in flight. See error and try again."
    puts "Error: #{response}"
end
package main

import(
    "fmt"
    "os"
    "encoding/json"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // Create an Elasticsearch query
    elasticSearchQuery := map[string]interface{} {
        "query": map[string]interface{} {
            "bool": map[string]interface{} {
                "must": []map[string]interface{} {
                    {"match": map[string]interface{}{"summary": "account based marketing"}},
                    {"term": map[string]interface{}{"location.country": "united states"}},
                },
            },
        },
    }

    // Create a parameters JSON object
    params := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 10,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            Query: elasticSearchQuery,
        },
    }
    
    // Pass the parameters object to the Company Search API
    response, err := client.Company.Search(context.Background(), params)
    // Check for successful response
    if err == nil {
        data := response.Data
        // Create file
        out, outErr := os.Create("my_pdl_search.jsonl")
        defer out.Close()
        if (outErr == nil) {
            for i := range data {
                // Convert each profile found to JSON
                record, jsonErr := json.Marshal(data[i])
                // Write out each profile to file
                if (jsonErr == nil) {
                    out.WriteString(string(record) + "\n")
                }
            }
            out.Sync()
        }
        fmt.Printf("Successfully grabbed %d records from PDL.\n", len(data))
        fmt.Printf("%d total PDL records exist matching this query.\n", response.Total)
    } else {
        fmt.Println("NOTE: The eager beaver was not so eager. See error and try again.")
        fmt.Println("Error:", err)
    } 
}
import requests, json

# Set your API key
API_KEY = "YOUR API KEY"

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  'Content-Type': "application/json",
  'X-api-key': API_KEY
}

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"match": {"summary": "account based marketing"}},
        {"term": {"location.country" : "united states"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
  'query': json.dumps(ES_QUERY),
  'size': 100
}

# Pass the parameters object to the Company Search API
response = requests.get(
  PDL_URL,
  headers=HEADERS,
  params=PARAMS
).json()

# Check for successful response
if response["status"] == 200:
  
  data = response['data']
  
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The eager beaver was not so eager. See error and try again.")
  print("error:", response)

Investment Research

"I want to find 100 small biotech companies headquartered in the San Francisco area with under 50 employees ."

import json

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
# for enumerated possible values of industry

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
# for enumerated possible values of company sizes

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"terms": {"size": ["1-10", "11-50"]}},
        {"term": {"industry" : "biotechnology"}},
        {"term": {"location.locality": "san francisco"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
    "query": ES_QUERY,
    "size": 100
}

# Pass the parameters object to the Company Search API
response = CLIENT.company.search(**PARAMS).json()

# Check for successful response
if response["status"] == 200:
  
  data = response['data']
  
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The eager beaver was not so eager. See error and try again.")
  print("error:", response)
import json

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
# for enumerated possible values of industry

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
# for enumerated possible values of company sizes

# Create an SQL query
SQL_QUERY = \
f"""
  SELECT * FROM company
  WHERE size IN ('1-10', '11-50')
  AND industry = 'biotechnology'
  AND location.locality='san francisco';
"""

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 100
}

# Pass the parameters object to the Company Search API
response = CLIENT.company.search(**PARAMS).json()

# Check for successful response
if response["status"] == 200:
  
  data = response['data']
  
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The eager beaver was not so eager. See error and try again.")
  print("error:", response)
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

import fs from 'fs';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Create an Elasticsearch query
const esQuery = {
  query: {
    bool: {
      must:[
        {"terms": {"size": ["1-10", "11-50"]}},
        {"term": {"industry" : "biotechnology"}},
        {"term": {"location.locality": "san francisco"}}
      ]
    }
  }
}

// Create a parameters JSON object
const params = {
  searchQuery: esQuery, 
  size: 100,
}

// Pass the parameters object to the Company Search API
PDLJSClient.company.search.elastic(params).then((data) => {
    // Write out all profiles found to file
    fs.writeFile("my_pdl_search.jsonl", Buffer.from(JSON.stringify(data.data)), (err) => {
        if (err) throw err;
    });
    console.log(`Successfully grabbed ${data.data.length} records from PDL.`);
    console.log(`${data["total"]} total PDL records exist matching this query.`)
}).catch((error) => {
    console.log("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
    console.log(error);
});
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

import fs from 'fs';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Create an SQL query
const sqlQuery = `SELECT * FROM company
                    WHERE size IN ('1-10', '11-50')
                    AND industry = 'biotechnology'
                    AND location.locality='san francisco';`;

// Create a parameters JSON object
const params = {
    searchQuery: sqlQuery, 
    size: 100
}

// Pass the parameters object to the Company Search API
PDLJSClient.company.search.sql(params).then((data) => {
    // Write out all profiles found to file
    fs.writeFile("my_pdl_search.jsonl", Buffer.from(JSON.stringify(data.data)), (err) => {
        if (err) throw err;
    });
    console.log(`Successfully grabbed ${data.data.length} records from PDL.`);
    console.log(`${data["total"]} total PDL records exist matching this query.`)
}).catch((error) => {
    console.log("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
    console.log(error);
});
require 'json'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
# for enumerated possible values of industry

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
# for enumerated possible values of company sizes

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"terms": {"size": ["1-10", "11-50"]}},
        {"term": {"industry": "biotechnology"}},
        {"term": {"location.locality": "san francisco"}}
      ]
    }
  }
}

# Pass parameters to the Company Search API
response = Peopledatalabs::Search.company(searchType: 'elastic', query: ES_QUERY, size: 100, pretty: true)

# Check for successful response
if response['status'] == 200
    data = response['data']
    # Write out each profile found to file
    File.open("my_pdl_search.jsonl", "w") do |out|
        data.each { |record| out.write(JSON.dump(record) + "\n") }
    end
    puts "Successfully grabbed #{data.length()} records from PDL."
    puts "#{response['total']} total PDL records exist matching this query."
else
    puts "NOTE: The carrier pigeons lost motivation in flight. See error and try again."
    puts "Error: #{response}"
end
require 'json'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
# for enumerated possible values of industry

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
# for enumerated possible values of company sizes

# Create an SQL query
SQL_QUERY = \
"""
  SELECT * FROM company
  WHERE size IN ('1-10', '11-50')
  AND industry = 'biotechnology'
  AND location.locality='san francisco';
"""

# Pass parameters to the Company Search API
response = Peopledatalabs::Search.company(searchType: 'sql', query: SQL_QUERY, size: 100, pretty: true)

# Check for successful response
if response['status'] == 200
    data = response['data']
    # Write out each profile found to file
    File.open("my_pdl_search.jsonl", "w") do |out|
        data.each { |record| out.write(JSON.dump(record) + "\n") }
    end
    puts "Successfully grabbed #{data.length()} records from PDL."
    puts "#{response['total']} total PDL records exist matching this query."
else
    puts "NOTE: The carrier pigeons lost motivation in flight. See error and try again."
    puts "Error: #{response}"
end
package main

import(
    "fmt"
    "os"
    "encoding/json"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
    // for enumerated possible values of industry

    // https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
    // for enumerated possible values of company sizes

    // Create an Elasticsearch query
    elasticSearchQuery := map[string]interface{} {
        "query": map[string]interface{} {
            "bool": map[string]interface{} {
                "must": []map[string]interface{} {
                    {"terms": map[string]interface{}{"size": []string{"1-10", "11-50"}}},
                    {"term": map[string]interface{}{"industry": "biotechnology"}},
                    {"term": map[string]interface{}{"location.locality": "san francisco"}},
                },
            },
        },
    }

    // Create a parameters JSON object
    params := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 100,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            Query: elasticSearchQuery,
        },
    }
    
    // Pass the parameters object to the Company Search API
    response, err := client.Company.Search(context.Background(), params)
    // Check for successful response
    if err == nil {
        data := response.Data
        // Create file
        out, outErr := os.Create("my_pdl_search.jsonl")
        defer out.Close()
        if (outErr == nil) {
            for i := range data {
                // Convert each profile found to JSON
                record, jsonErr := json.Marshal(data[i])
                // Write out each profile to file
                if (jsonErr == nil) {
                    out.WriteString(string(record) + "\n")
                }
            }
            out.Sync()
        }
        fmt.Printf("Successfully grabbed %d records from pdl.\n", len(data))
        fmt.Printf("%d total PDL records exist matching this query.\n", response.Total)
    } else {
        fmt.Println("NOTE: The eager beaver was not so eager. See error and try again.")
        fmt.Println("Error:", err)
    } 
}
package main

import(
    "fmt"
    "os"
    "encoding/json"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
    // for enumerated possible values of industry

    // https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
    // for enumerated possible values of company sizes

    // Create an SQL query
    sqlQuery := "SELECT * FROM company" +
        " WHERE size IN ('1-10', '11-50')" +
        " AND industry = 'biotechnology'" +
        " AND location.locality='san francisco';"

    // Create a parameters JSON object
    params := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 100,
            Pretty: true,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            SQL: sqlQuery,
        },
    }
    
    // Pass the parameters object to the Company Search API
    response, err := client.Company.Search(context.Background(), params)
    // Check for successful response
    if err == nil {
        data := response.Data
        // Create file
        out, outErr := os.Create("my_pdl_search.jsonl")
        defer out.Close()
        if (outErr == nil) {
            for i := range data {
                // Convert each profile found to JSON
                record, jsonErr := json.Marshal(data[i])
                // Write out each profile to file
                if (jsonErr == nil) {
                    out.WriteString(string(record) + "\n")
                }
            }
            out.Sync()
        }
        fmt.Printf("Successfully grabbed %d records from PDL.\n", len(data))
        fmt.Printf("%d total PDL records exist matching this query.\n", response.Total)
    } else {
        fmt.Println("NOTE: The eager beaver was not so eager. See error and try again.")
        fmt.Println("Error:", err)
    } 
}
import requests, json

# Set your API key
API_KEY = "YOUR API KEY"

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  "Content-Type": "application/json",
  "X-api-key": API_KEY
}

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
# for enumerated possible values of industry

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
# for enumerated possible values of company sizes

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"terms": {"size": ["1-10", "11-50"]}},
        {"term": {"industry" : "biotechnology"}},
        {"term": {"location.locality": "san francisco"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
    "query": json.dumps(ES_QUERY),
    "size": 100
}

# Pass the parameters object to the Company Search API
response = requests.get(
    PDL_URL,
    headers=HEADERS,
    params=PARAMS
).json()

# Check for successful response
if response["status"] == 200:
  
  data = response['data']
  
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")

  print(f"Successfully grabbed {len(response['data'])} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The eager beaver was not so eager. See error and try again.")
  print("error:", response)
import requests, json

# Set your API key
API_KEY = "YOUR API KEY"

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  "Content-Type": "application/json",
  "X-api-key": API_KEY
}

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
# for enumerated possible values of industry

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
# for enumerated possible values of company sizes

# Create an SQL query
SQL_QUERY = \
f"""
  SELECT * FROM company
  WHERE size IN ('1-10', '11-50')
  AND industry = 'biotechnology'
  AND location.locality='san francisco';
"""

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 100
}

# Pass the parameters object to the Company Search API
response = requests.get(
  PDL_URL,
  headers=HEADERS,
  params=PARAMS
).json()

# Check for successful response
if response["status"] == 200:
  
  data = response['data']
  
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")

  print(f"Successfully grabbed {len(response['data'])} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The eager beaver was not so eager. See error and try again.")
  print("error:", response)

Bulk Retrieval

"I want to find all automotive companies in the Detroit area and save them to a CSV file."

🚧

High Credit Usage Code Below

The code example below illustrates retrieving all the company profiles in a metro area and is meant primarily for demonstrating the use of the scroll_token parameter when requesting large amounts of records. As a result, this code is mostly illustrative in meaning. It further can expend a lot of credits and doesn't have any error handling. The MAX_NUM_RECORDS_LIMIT parameter in the example sets the maximum number of profiles that you will retrieve (and the maximum number of credits that you will expend), so please set that accordingly when testing this example.

import json, time, csv

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# Limit the number of records to pull (to prevent accidentally using 
# more credits than expected when testing out this code).
MAX_NUM_RECORDS_LIMIT = 150 # The maximum number of records to retrieve
USE_MAX_NUM_RECORDS_LIMIT = True # Set to False to pull all available records

# Create an Elasticsearch query
ES_QUERY = {
  'query': {
    'bool': {
      'must': [
        {'term': {'industry': "automotive"}},
        {'term': {'location.metro': "detroit, michigan"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
  'query': ES_QUERY,
  'size': 100,
  'pretty': True
}

# Pull all results in multiple batches
batch = 1
# Store all records retreived in an array
all_records = []
# Time the process
start_time = time.time()
found_all_records = False
continue_scrolling = True

# While still scrolling through data and still records to be found
while continue_scrolling and not found_all_records: 

  # Check if we reached the maximum number of records we want
  if USE_MAX_NUM_RECORDS_LIMIT:
    num_records_to_request = MAX_NUM_RECORDS_LIMIT - len(all_records)
    # Adjust size parameter
    PARAMS['size'] = max(0, min(100, num_records_to_request))
    # Check if MAX_NUM_RECORDS_LIMIT reached
    if num_records_to_request == 0:
      print(f"Stopping - reached maximum number of records to pull "
            f"[MAX_NUM_RECORDS_LIMIT = {MAX_NUM_RECORDS_LIMIT}].")
      break

  # Pass the parameters object to the Company Search API
  response = CLIENT.company.search(**PARAMS).json()

  # Check for successful response
  if response['status'] == 200:
    # Add records retrieved to the records array
    all_records.extend(response['data'])
    print(f"Retrieved {len(response['data'])} records in batch {batch} "
          f"- {response['total'] - len(all_records)} records remaining.")
  else:
    print(f"Error retrieving some records:\n\t"
          f"[{response['status']} - {response['error']['type']}] "
          f"{response['error']['message']}")
  
  # Get scroll_token from response if exists and store it in parameters object
  if 'scroll_token' in response:
    PARAMS['scroll_token'] = response['scroll_token']
  else:
    continue_scrolling = False
    print(f"Unable to continue scrolling.")

  batch += 1
  found_all_records = (len(all_records) == response['total'])
  time.sleep(6) # avoid hitting rate limit thresholds
 
# Calculate time required to process batches
end_time = time.time()
runtime = end_time - start_time
        
print(f"Successfully recovered {len(all_records)} profiles in "
      f"{batch} batches [{round(runtime, 2)} seconds].")

# Save profiles to CSV (utility function)
def save_profiles_to_csv(profiles, filename, fields=[], delim=','):
  # Define header fields
  if fields == [] and len(profiles) > 0:
      fields = profiles[0].keys()
  # Write CSV file
  with open(filename, 'w') as csvfile:
    writer = csv.writer(csvfile, delimiter=delim)
    # Write header
    writer.writerow(fields)
    # Write body
    count = 0
    for profile in profiles:
      writer.writerow([ profile[field] for field in fields ])
      count += 1
  print(f"Wrote {count} lines to: '{filename}'.")

# Use utility function to save all records retrieved to CSV   
csv_header_fields = ['name', 'website', "linkedin_url",
                     'size', 'tags']
csv_filename = "all_company_profiles.csv"
save_profiles_to_csv(all_records, csv_filename, csv_header_fields)
import json, time, csv

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# Limit the number of records to pull (to prevent accidentally using 
# more credits than expected when testing out this code).
MAX_NUM_RECORDS_LIMIT = 150 # The maximum number of records to retrieve
USE_MAX_NUM_RECORDS_LIMIT = True # Set to False to pull all available records

# Create an SQL query
SQL_QUERY = \
f"""
  SELECT * FROM company
  WHERE industry = 'automotive'
  AND location.metro='detroit, michigan';
"""

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 100,
  'pretty': True
}

# Pull all results in multiple batches
batch = 1
# Store all records retreived in an array
all_records = []
# Time the process
start_time = time.time()
found_all_records = False
continue_scrolling = True

# While still scrolling through data and still records to be found
while continue_scrolling and not found_all_records: 

  # Check if we reached the maximum number of records we want
  if USE_MAX_NUM_RECORDS_LIMIT:
    num_records_to_request = MAX_NUM_RECORDS_LIMIT - len(all_records)
    # Adjust size parameter
    PARAMS['size'] = max(0, min(100, num_records_to_request))
    # Check if MAX_NUM_RECORDS_LIMIT reached
    if num_records_to_request == 0:
      print(f"Stopping - reached maximum number of records to pull "
            f"[MAX_NUM_RECORDS_LIMIT = {MAX_NUM_RECORDS_LIMIT}].")
      break

  # Pass the parameters object to the Company Search API
  response = CLIENT.company.search(**PARAMS).json()

  # Check for successful response
  if response['status'] == 200:
    # Add records retrieved to the records array
    all_records.extend(response['data'])
    print(f"Retrieved {len(response['data'])} records in batch {batch} "
          f"- {response['total'] - len(all_records)} records remaining.")
  else:
    print(f"Error retrieving some records:\n\t"
          f"[{response['status']} - {response['error']['type']}] "
          f"{response['error']['message']}")
  
  # Get scroll_token from response if exists and store it in parameters object
  if 'scroll_token' in response:
    PARAMS['scroll_token'] = response['scroll_token']
  else:
    continue_scrolling = False
    print(f"Unable to continue scrolling.")

  batch += 1
  found_all_records = (len(all_records) == response['total'])
  time.sleep(6) # avoid hitting rate limit thresholds
 
# Calculate time required to process batches
end_time = time.time()
runtime = end_time - start_time
        
print(f"Successfully recovered {len(all_records)} profiles in "
      f"{batch} batches [{round(runtime, 2)} seconds].")

# Save profiles to CSV (utility function)
def save_profiles_to_csv(profiles, filename, fields=[], delim=','):
  # Define header fields
  if fields == [] and len(profiles) > 0:
      fields = profiles[0].keys()
  # Write CSV file
  with open(filename, 'w') as csvfile:
    writer = csv.writer(csvfile, delimiter=delim)
    # Write header
    writer.writerow(fields)
    # Write body
    count = 0
    for profile in profiles:
      writer.writerow([ profile[field] for field in fields ])
      count += 1
  print(f"Wrote {count} lines to: '{filename}'.")

# Use utility function to save all records retrieved to CSV   
csv_header_fields = ['name', 'website', "linkedin_url",
                     'size', 'tags']
csv_filename = "all_company_profiles.csv"
save_profiles_to_csv(all_records, csv_filename, csv_header_fields)
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

// See https://www.npmjs.com/package/csv-writer
import * as csvwriter from 'csv-writer';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Limit the number of records to pull (to prevent accidentally using 
// more credits than expected when testing out this code).
const maxNumRecordsLimit = 150;     // The maximum number of records to retrieve
const useMaxNumRecordsLimit = true; // Set to false to pull all available records

// Create an Elasticsearch query
const esQuery = {
  query: {
    bool: {
      must:[
        {'term': {'industry': "automotive"}},
        {'term': {'location.metro': "detroit, michigan"}}
      ]
    }
  }
}

// Create a parameters JSON object
var params = {
  searchQuery: esQuery, 
  size: 100,
  scroll_token: null,
  pretty: true
}

// Pull all results in multiple batches
var batch = 1;

// Store all records retreived in an array
var allRecords = [];
// Time the process
var startTime = Date.now();
var foundAllRecords = false;
var continueScrolling = true;
var numRetrieved = 0;
// Queue parameter objects in order to iterate through batches
var paramQueue = [];
// The current scroll_token
var scrollToken = null;
var numRecordsToRequest = 100;

while (numRecordsToRequest > 0) { 

    // Check if we reached the maximum number of records we want
    if (useMaxNumRecordsLimit) {
        numRecordsToRequest = maxNumRecordsLimit - numRetrieved;
        // Adjust size parameter
        params.size = Math.max(0, Math.min(100, numRecordsToRequest));
        numRetrieved += params.size;
        // Add batch to the parameter queue
        if (params.size > 0) {       
            paramQueue.push(JSON.parse(JSON.stringify(params)));
        }
    } else {
        break;
    }
}

// Run initial batch
runBatch();

// Retrieve records associated with a batch
function runBatch() {
    // Get the parameters for the current batch
    let currParams = useMaxNumRecordsLimit ? paramQueue[batch-1] : params;
    // Set the scroll_token from the previous batch
    currParams.scroll_token = scrollToken;
    batch++;
                
    // Pass the current parameters object to the Company Search API
    PDLJSClient.company.search.elastic(currParams).then((data) => {
        // Add records retrieved to the records array
        Array.prototype.push.apply(allRecords, data.data);
            
        // Store the scroll_token if exists
        if (data['scroll_token']) {
            scrollToken = data['scroll_token'];
        } else {
            continueScrolling = false;
            console.log("Unable to continue scrolling.");
        }
            
        foundAllRecords = (allRecords.length == data['total']);
            
        console.log(`Retrieved ${data.data.length} records in batch ${(batch-1)}` +
            ` - ${(data['total'] - allRecords.length)} records remaining.`);
            
        // Run next batch recursively, if any
        if (!foundAllRecords && (batch <= paramQueue.length || !useMaxNumRecordsLimit)) {
            runBatch();
        } else {
            console.log(`Stopping - reached maximum number of records to pull [maxNumRecordsLimit = ` +
                `${maxNumRecordsLimit}].`);  
                
            // Calculate time required to process batches
            let endTime = Date.now();
            let runTime = endTime - startTime;
            console.log (`Successfully recovered ${allRecords.length} profiles in ` +
                `${(batch-1)} batches [${Math.round(runTime/1000)} seconds].`);
                
            // Set CSV fields
            let csvHeaderFields = [
                {id: "work_email", title: "work_email"},
                {id: "full_name", title: "full_name"}, 
                {id: "linkedin_url", title: "linkedin_url"},
                {id: "size", title: "size"},
                {id: "tags", title: "tags"}
            ];
            let csvFilename = "all_company_profiles.csv";
            // Write records array to CSV file
            saveProfilesToCSV(allRecords, csvFilename, csvHeaderFields);           
        }
    }).catch((error) => {
        console.log(error);
    });

}

// Write CSV file using csv-writer (https://www.npmjs.com/package/csv-writer) 
// $ npm i -s csv-writer
function saveProfilesToCSV(profiles, filename, fields) {

    // Create CSV file
    const createCsvWriter = csvwriter.createObjectCsvWriter;
    const csvWriter = createCsvWriter({
        path: filename,
        header: fields
    });
    
    let data = [];
    // Iterate through records array
    for (let i = 0; i < profiles.length; i++) {
        let record = profiles[i];
        data[i] = {};
        // Store requested fields
        for (let field in fields) {
            data[i][fields[field].id] = record[fields[field].id];    
        }
     }

    // Write data to CSV file
    csvWriter
        .writeRecords(data)
        .then(()=> console.log('The CSV file was written successfully.'));
}
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

// See https://www.npmjs.com/package/csv-writer
import * as csvwriter from 'csv-writer';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Limit the number of records to pull (to prevent accidentally using 
// more credits than expected when testing out this code).
const maxNumRecordsLimit = 150;     // The maximum number of records to retrieve
const useMaxNumRecordsLimit = true; // Set to false to pull all available records

// Create an SQL query
const sqlQuery = `SELECT * FROM company
                    WHERE industry = 'automotive'
                    AND location.metro='detroit, michigan';`;

// Create a parameters JSON object
var params = {
  searchQuery: sqlQuery, 
  size: 100,
  scroll_token: null,
  pretty: true
}

// Pull all results in multiple batches
var batch = 1;

// Store all records retreived in an array
var allRecords = [];
// Time the process
var startTime = Date.now();
var foundAllRecords = false;
var continueScrolling = true;
var numRetrieved = 0;
// Queue parameter objects in order to iterate through batches
var paramQueue = [];
// The current scroll_token
var scrollToken = null;
var numRecordsToRequest = 100;

while (numRecordsToRequest > 0) { 

    // Check if we reached the maximum number of records we want
    if (useMaxNumRecordsLimit) {
        numRecordsToRequest = maxNumRecordsLimit - numRetrieved;
        // Adjust size parameter
        params.size = Math.max(0, Math.min(100, numRecordsToRequest));
        numRetrieved += params.size;
        // Add batch to the parameter queue
        if (params.size > 0) {       
            paramQueue.push(JSON.parse(JSON.stringify(params)));
        }
    } else {
        break;
    }
}

// Run initial batch
runBatch();

// Retrieve records associated with a batch
function runBatch() {
    // Get the parameters for the current batch
    let currParams = useMaxNumRecordsLimit ? paramQueue[batch-1] : params;
    // Set the scroll_token from the previous batch
    currParams.scroll_token = scrollToken;
    batch++;
                
    // Pass the current parameters object to the Company Search API
    PDLJSClient.company.search.sql(currParams).then((data) => {
        // Add records retrieved to the records array
        Array.prototype.push.apply(allRecords, data.data);
            
        // Store the scroll_token if exists
        if (data['scroll_token']) {
            scrollToken = data['scroll_token'];
        } else {
            continueScrolling = false;
            console.log("Unable to continue scrolling.");
        }
            
        foundAllRecords = (allRecords.length == data['total']);
            
        console.log(`Retrieved ${data.data.length} records in batch ${(batch-1)}` +
            ` - ${(data['total'] - allRecords.length)} records remaining.`);
            
        // Run next batch recursively, if any
        if (!foundAllRecords && (batch <= paramQueue.length || !useMaxNumRecordsLimit)) {
            runBatch();
        } else {
            console.log(`Stopping - reached maximum number of records to pull [maxNumRecordsLimit = ` +
                `${maxNumRecordsLimit}].`);  
                
            // Calculate time required to process batches
            let endTime = Date.now();
            let runTime = endTime - startTime;
            console.log (`Successfully recovered ${allRecords.length} profiles in ` +
                `${(batch-1)} batches [${Math.round(runTime/1000)} seconds].`);
                
            // Set CSV fields
            let csvHeaderFields = [
                {id: "work_email", title: "work_email"},
                {id: "full_name", title: "full_name"}, 
                {id: "linkedin_url", title: "linkedin_url"},
                {id: "size", title: "size"},
                {id: "tags", title: "tags"}
            ];
            let csvFilename = "all_company_profiles.csv";
            // Write records array to CSV file
            saveProfilesToCSV(allRecords, csvFilename, csvHeaderFields);           
        }
    }).catch((error) => {
        console.log(error);
    });

}

// Write CSV file using csv-writer (https://www.npmjs.com/package/csv-writer)
// $ npm i -s csv-writer
function saveProfilesToCSV(profiles, filename, fields) {

    // Create CSV file
    const createCsvWriter = csvwriter.createObjectCsvWriter;
    const csvWriter = createCsvWriter({
        path: filename,
        header: fields
    });
    
    let data = [];
    for (let i = 0; i < profiles.length; i++) {
        let record = profiles[i];
        data[i] = {};
        // Store requested fields
        for (let field in fields) {
            data[i][fields[field].id] = record[fields[field].id];    
        }
     }

    // Write data to CSV file
    csvWriter
        .writeRecords(data)
        .then(()=> console.log('The CSV file was written successfully.'));
}
require 'json'
require 'csv'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# Limit the number of records to pull (to prevent accidentally using 
# more credits than expected when testing out this code).
MAX_NUM_RECORDS_LIMIT = 150 # The maximum number of records to retrieve
USE_MAX_NUM_RECORDS_LIMIT = true # Set to false to pull all available records

# Create an Elasticsearch query
ES_QUERY = {
  'query': {
    'bool': {
      'must': [
        {'term': {'industry': "automotive"}},
        {'term': {'location.metro': "detroit, michigan"}}
      ]
    }
  }
}

# Pull all results in multiple batches
batch = 1
# Store all records retreived in an array
all_records = []
# Time the process
start_time = Time.now
found_all_records = false
continue_scrolling = true
scroll_token = {}

# While still scrolling through data and still records to be found
while continue_scrolling && !found_all_records do 

  # Check if we reached the maximum number of records we want
  if USE_MAX_NUM_RECORDS_LIMIT
    num_records_to_request = MAX_NUM_RECORDS_LIMIT - all_records.length()
    # Adjust size parameter
    size = [0, [100, num_records_to_request].min].max
    # Check if MAX_NUM_RECORDS_LIMIT reached
    if num_records_to_request == 0
      puts "Stopping - reached maximum number of records to pull "
      puts "[MAX_NUM_RECORDS_LIMIT = #{MAX_NUM_RECORDS_LIMIT}]."
      break
    end
  end

  # Pass parameters to the Company Search API
  response = Peopledatalabs::Search.company(searchType: 'elastic', query: ES_QUERY, size: size, scroll_token: scroll_token, pretty: true)

  # Check for successful response
  if response['status'] == 200
    # Add records retrieved to the records array
    all_records += response['data']
    puts "Retrieved #{response['data'].length()} records in batch #{batch} "
    puts  "- #{response['total'] - all_records.length()} records remaining."
  else
    puts "Error retrieving some records:\n\t"
    puts "[#{response['status']} - #{response['error']['type']}] "
    puts response['error']['message']
  end
  
  # Get scroll_token from response if exists and store it
  if response.key?('scroll_token')
    scroll_token = response['scroll_token']
  else
    continue_scrolling = false
    puts "Unable to continue scrolling."
  end

  batch += 1
  found_all_records = (all_records.length() == response['total'])
  sleep(6) # avoid hitting rate limit thresholds
end

# Calculate time required to process batches
end_time = Time.now
runtime = end_time - start_time
        
puts "Successfully recovered #{all_records.length()} profiles in "
puts "#{batch} batches [#{runtime.round(2)} seconds]."

# Save profiles to CSV (utility function)
def save_profiles_to_csv(profiles, filename, fields=[], delim=',')
  # Define header fields
  if fields == [] && profiles.length() > 0
      fields = profiles[0].keys
  end
    
  count = 0
  # Write CSV file
  CSV.open(filename, 'w') do |writer|
    # Write header
    writer << fields
    # Write body
    profiles.each do |profile|
      record = []
      fields.each do |field| 
        record << profile[field]
        count += 1
      end
      writer << record
    end
  end
  puts "Wrote #{count} lines to: '#{filename}'."
end

# Use utility function to save profiles to CSV   
csv_header_fields = ['name', 'website', 'linkedin_url',
                     'size', 'tags']
csv_filename = "all_company_profiles.csv"
save_profiles_to_csv(all_records, csv_filename, csv_header_fields)
require 'json'
require 'csv'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# Limit the number of records to pull (to prevent accidentally using 
# more credits than expected when testing out this code).
MAX_NUM_RECORDS_LIMIT = 150 # The maximum number of records to retrieve
USE_MAX_NUM_RECORDS_LIMIT = true # Set to false to pull all available records

# Create an SQL query
SQL_QUERY = \
"""
  SELECT * FROM company
  WHERE industry = 'automotive'
  AND location.metro='detroit, michigan';
"""

# Pull all results in multiple batches
batch = 1
# Store all records retreived in an array
all_records = []
# Time the process
start_time = Time.now
found_all_records = false
continue_scrolling = true
scroll_token = {}

# While still scrolling through data and still records to be found
while continue_scrolling && !found_all_records do 

  # Check if we reached the maximum number of records we want
  if USE_MAX_NUM_RECORDS_LIMIT
    num_records_to_request = MAX_NUM_RECORDS_LIMIT - all_records.length()
    # Adjust size parameter
    size = [0, [100, num_records_to_request].min].max
    # Check if MAX_NUM_RECORDS_LIMIT reached
    if num_records_to_request == 0
      puts "Stopping - reached maximum number of records to pull "
      puts "[MAX_NUM_RECORDS_LIMIT = #{MAX_NUM_RECORDS_LIMIT}]."
      break
    end
  end

  # Pass parameters to the Company Search API
  response = Peopledatalabs::Search.company(searchType: 'sql', query: SQL_QUERY, size: size, scroll_token: scroll_token, pretty: true)

  # Check for successful response
  if response['status'] == 200
    # Add records retrieved to the records array
    all_records += response['data']
    puts "Retrieved #{response['data'].length()} records in batch #{batch} "
    puts  "- #{response['total'] - all_records.length()} records remaining."
  else
    puts "Error retrieving some records:\n\t"
    puts "[#{response['status']} - #{response['error']['type']}] "
    puts response['error']['message']
  end
  
  # Get scroll_token from response if exists and store it
  if response.key?('scroll_token')
    scroll_token = response['scroll_token']
  else
    continue_scrolling = false
    puts "Unable to continue scrolling."
  end

  batch += 1
  found_all_records = (all_records.length() == response['total'])
  sleep(6) # avoid hitting rate limit thresholds
end

# Calculate time required to process batches
end_time = Time.now
runtime = end_time - start_time
        
puts "Successfully recovered #{all_records.length()} profiles in "
puts "#{batch} batches [#{runtime.round(2)} seconds]."

# Save profiles to CSV (utility function)
def save_profiles_to_csv(profiles, filename, fields=[], delim=',')
  # Define header fields
  if fields == [] && profiles.length() > 0
      fields = profiles[0].keys
  end
    
  count = 0
  # Write CSV file
  CSV.open(filename, 'w') do |writer|
    # Write header
    writer << fields
    # Write body
    profiles.each do |profile|
      record = []
      fields.each do |field| 
        record << profile[field]
        count += 1
      end
      writer << record
    end
  end
  puts "Wrote #{count} lines to: '#{filename}'."
end

# Use utility function to save profiles to CSV 
csv_header_fields = ['name', 'website', 'linkedin_url',
                     'size', 'tags']
csv_filename = "all_company_profiles.csv"
save_profiles_to_csv(all_records, csv_filename, csv_header_fields)
package main

import (
    "fmt"
    "time"
    "os"
    "math"
    "reflect"
    "encoding/json"
    "encoding/csv"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // Limit the number of records to pull (to prevent accidentally using 
    // more credits than expected when testing out this code).
    const maxNumRecordsLimit = 150 // The maximum number of records to retrieve
    const useMaxNumRecordsLimit = true // Set to False to pull all available records

    // Create an Elasticsearch query
    elasticSearchQuery := map[string]interface{} {
        "query": map[string]interface{} {
            "bool": map[string]interface{} {
                "must": []map[string]interface{} {
                    {"term": map[string]interface{}{"industry": "automotive"}},
                    {"term": map[string]interface{}{"location.metro": "detroit, michigan"}},
                },
            },
        },
    }

    // Create a parameters JSON object
    p := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 50,
            Pretty: true,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            Query: elasticSearchQuery,
        },
    }

    // Pull all results in multiple batches
    batch := 1
    // Store all records retreived in an array
    var allRecords []pdlmodel.Company
    // Time the process
    startTime := time.Now()
    foundAllRecords := false
    continueScrolling := true
    var numRecordsToRequest int
    
    // While still scrolling through data and still records to be found
    for continueScrolling && !foundAllRecords {
        // Check if we reached the maximum number of records we want
        if useMaxNumRecordsLimit {
            numRecordsToRequest = maxNumRecordsLimit - len(allRecords)
            // Adjust size parameter
            p.BaseParams.Size = (int) (math.Max(0.0, math.Min(50.0, (float64) (numRecordsToRequest))))
            // Check if MAX_NUM_RECORDS_LIMIT reached
            if numRecordsToRequest == 0 {
                fmt.Printf("Stopping - reached maximum number of records to pull " +
                           "[MAX_NUM_RECORDS_LIMIT = %d].\n", maxNumRecordsLimit)
                
                break
            }
        }
        
        // Pass the parameters object to the Company Search API
        response, err := client.Company.Search(context.Background(), p)
        
        // Check for successful response
        if err == nil {
            fmt.Printf("Retrieved %d records in batch %d - %d records remaining.\n", 
                       len(response.Data), batch, response.Total - len(allRecords))
        } else {
            fmt.Println("Error retrieving some records:\n\t",
                       err)
        }
        
        // Convert response to JSON
        var data map[string]interface{}
        jsonResponse, jsonErr := json.Marshal(response)
        if jsonErr == nil {
            json.Unmarshal(jsonResponse, &data)
            // Get scroll_token from response if exists and store it in parameters object
            if scrollToken, ok := data["scroll_token"]; ok {
                p.SearchBaseParams.ScrollToken = fmt.Sprintf("%v", scrollToken)
            } else {
                continueScrolling = false
                fmt.Println("Unable to continue scrolling.")
            }
            // Add records retrieved to the records array
            allRecords = append(allRecords, response.Data...)
      }
        
        batch++
        foundAllRecords = (len(allRecords) == response.Total)
        time.Sleep(6 * time.Second) // avoid hitting rate limit thresholds
    }
    
    // Calculate time required to process batches
    endTime := time.Now()
    runtime := endTime.Sub(startTime).Seconds()
        
    fmt.Printf("Successfully recovered %d profiles in %d batches [%d seconds].\n",
               len(allRecords), batch, (int) (math.Round((float64) (runtime))))
    
    // Use utility function to save profiles to CSV    
    csvHeaderFields := []string{"name", "website", "linkedin_url",
                                "size", "tags"}
    csvFilename := "all_company_profiles.csv"
    saveProfilesToCsv(allRecords, csvFilename, csvHeaderFields, ",")
}

// Save profiles to CSV (utility function)
func saveProfilesToCsv(profiles []pdlmodel.Company, filename string, fields []string, delim string) {
    // Define header fields
    if fields == nil && len(profiles) > 0 {
        e := reflect.ValueOf(&(profiles[0])).Elem()
        for i := 0; i < e.NumField(); i++ {
            fields = append(fields, e.Type().Field(i).Name)
        }
    }
    
    // Write CSV file
    csvFile, err := os.Create(filename)
    if err == nil {
        csvwriter := csv.NewWriter(csvFile)
        defer csvwriter.Flush()
        // Write header
        csvwriter.Write(fields)
        // Write body
        count := 0
        for i := range profiles {
            var data map[string]interface{}
            jsonResponse, jsonErr := json.Marshal(profiles[i])
            if jsonErr == nil {
                json.Unmarshal(jsonResponse, &data)
                var record []string
                for j := range fields {
                    record = append(record, fmt.Sprintf("%v", data[fields[j]]))
                }
                csvwriter.Write(record)
                count++
            }
        }
        fmt.Printf("Wrote %d lines to: %s.\n", count, filename)
    }
}
package main

import (
    "fmt"
    "time"
    "os"
    "math"
    "reflect"
    "encoding/json"
    "encoding/csv"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // Limit the number of records to pull (to prevent accidentally using 
    // more credits than expected when testing out this code).
    const maxNumRecordsLimit = 150 // The maximum number of records to retrieve
    const useMaxNumRecordsLimit = true // Set to False to pull all available records

    // Create an SQL query
    sqlQuery := "SELECT * FROM company" +
        " WHERE industry = 'automotive'" +
        " AND location.metro='detroit, michigan';"

    // Create a parameters JSON object
    p := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 50,
            Pretty: true,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            SQL: sqlQuery,
        },
    }

    // Pull all results in multiple batches
    batch := 1
    // Store all records retreived in an array
    var allRecords []pdlmodel.Company
    // Time the process
    startTime := time.Now()
    foundAllRecords := false
    continueScrolling := true
    var numRecordsToRequest int
    
    // While still scrolling through data and still records to be found
    for continueScrolling && !foundAllRecords {
        // Check if we reached the maximum number of records we want
        if useMaxNumRecordsLimit {
            numRecordsToRequest = maxNumRecordsLimit - len(allRecords)
            // Adjust size parameter
            p.BaseParams.Size = (int) (math.Max(0.0, math.Min(50.0, (float64) (numRecordsToRequest))))
            // Check if MAX_NUM_RECORDS_LIMIT reached
            if numRecordsToRequest == 0 {
                fmt.Printf("Stopping - reached maximum number of records to pull " +
                           "[MAX_NUM_RECORDS_LIMIT = %d].\n", maxNumRecordsLimit)
                
                break
            }
        }
        
        // Pass the parameters object to the Company Search API
        response, err := client.Company.Search(context.Background(), p)
        
        // Check for successful response
        if err == nil {
            fmt.Printf("Retrieved %d records in batch %d - %d records remaining.\n", 
                       len(response.Data), batch, response.Total - len(allRecords))
        } else {
            fmt.Println("Error retrieving some records:\n\t",
                       err)
        }
        
        // Convert response to JSON
        var data map[string]interface{}
        jsonResponse, jsonErr := json.Marshal(response)
        if jsonErr == nil {
            json.Unmarshal(jsonResponse, &data)
            // Get scroll_token from response if exists and store it in parameters object
            if scrollToken, ok := data["scroll_token"]; ok {
                p.SearchBaseParams.ScrollToken = fmt.Sprintf("%v", scrollToken)
            } else {
                continueScrolling = false
                fmt.Println("Unable to continue scrolling.")
            }
            // Add records retrieved to the records array
            allRecords = append(allRecords, response.Data...)
      }
        
        batch++
        foundAllRecords = (len(allRecords) == response.Total)
        time.Sleep(6 * time.Second) // avoid hitting rate limit thresholds
    }
    
    // Calculate time required to process batches
    endTime := time.Now()
    runtime := endTime.Sub(startTime).Seconds()
        
    fmt.Printf("Successfully recovered %d profiles in %d batches [%d seconds].\n",
               len(allRecords), batch, (int) (math.Round((float64) (runtime))))
    
    // Use utility function to save profiles to CSV   
    csvHeaderFields := []string{"name", "website", "linkedin_url",
                                "size", "tags"}
    csvFilename := "all_company_profiles.csv"
    saveProfilesToCsv(allRecords, csvFilename, csvHeaderFields, ",")
}

// Save profiles to CSV (utility function)
func saveProfilesToCsv(profiles []pdlmodel.Company, filename string, fields []string, delim string) {
    // Define header fields
    if fields == nil && len(profiles) > 0 {
        e := reflect.ValueOf(&(profiles[0])).Elem()
        for i := 0; i < e.NumField(); i++ {
            fields = append(fields, e.Type().Field(i).Name)
        }
    }
    
    // Write CSV file
    csvFile, err := os.Create(filename)
    if err == nil {
        csvwriter := csv.NewWriter(csvFile)
        defer csvwriter.Flush()
        // Write header
        csvwriter.Write(fields)
        // Write body
        count := 0
        for i := range profiles {
            var data map[string]interface{}
            jsonResponse, jsonErr := json.Marshal(profiles[i])
            if jsonErr == nil {
                json.Unmarshal(jsonResponse, &data)
                var record []string
                for j := range fields {
                    record = append(record, fmt.Sprintf("%v", data[fields[j]]))
                }
                csvwriter.Write(record)
                count++
            }
        }
        fmt.Printf("Wrote %d lines to: %s.\n", count, filename)
    }
}
import requests, json, time, csv

# Set your API key
API_KEY = "YOUR API KEY"

# Limit the number of records to pull (to prevent accidentally using 
# more credits than expected when testing out this code).
MAX_NUM_RECORDS_LIMIT = 150 # The maximum number of records to retrieve
USE_MAX_NUM_RECORDS_LIMIT = True # Set to False to pull all available records

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  'Content-Type': "application/json",
  'X-api-key': API_KEY
}

# Create an Elasticsearch query
ES_QUERY = {
  'query': {
    'bool': {
      'must': [
        {'term': {'industry': "automotive"}},
        {'term': {'location.metro': "detroit, michigan"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
  'query': json.dumps(ES_QUERY),
  'size': 100,
  'pretty': True
}

# Pull all results in multiple batches
batch = 1
# Store all records retreived in an array
all_records = []
# Time the process
start_time = time.time()
found_all_records = False
continue_scrolling = True

# While still scrolling through data and still records to be found
while continue_scrolling and not found_all_records: 

  # Check if we reached the maximum number of records we want
  if USE_MAX_NUM_RECORDS_LIMIT:
    num_records_to_request = MAX_NUM_RECORDS_LIMIT - len(all_records)
    # Adjust size parameter
    PARAMS['size'] = max(0, min(100, num_records_to_request))
    # Check if MAX_NUM_RECORDS_LIMIT reached
    if num_records_to_request == 0:
      print(f"Stopping - reached maximum number of records to pull "
            f"[MAX_NUM_RECORDS_LIMIT = {MAX_NUM_RECORDS_LIMIT}].")
      break

  # Pass the parameters object to the Company Search API
  response = requests.get(
    PDL_URL,
    headers=HEADERS,
    params=PARAMS
  ).json()

  # Check for successful response
  if response['status'] == 200:
    # Add records retrieved to the records array
    all_records.extend(response['data'])
    print(f"Retrieved {len(response['data'])} records in batch {batch} "
          f"- {response['total'] - len(all_records)} records remaining.")
  else:
    print(f"Error retrieving some records:\n\t"
          f"[{response['status']} - {response['error']['type']}] "
          f"{response['error']['message']}")
  
  # Get scroll_token from response if exists and store it in parameters object
  if 'scroll_token' in response:
    PARAMS['scroll_token'] = response['scroll_token']
  else:
    continue_scrolling = False
    print(f"Unable to continue scrolling.")

  batch += 1
  found_all_records = (len(all_records) == response['total'])
  time.sleep(6) # avoid hitting rate limit thresholds
 
# Calculate time required to process batches
end_time = time.time()
runtime = end_time - start_time
        
print(f"Successfully recovered {len(all_records)} profiles in "
      f"{batch} batches [{round(runtime, 2)} seconds].")

# Save profiles to CSV (utility function)
def save_profiles_to_csv(profiles, filename, fields=[], delim=','):
  # Define header fields
  if fields == [] and len(profiles) > 0:
      fields = profiles[0].keys()
  # Write CSV file
  with open(filename, 'w') as csvfile:
    writer = csv.writer(csvfile, delimiter=delim)
    # Write header
    writer.writerow(fields)
    # Write body
    count = 0
    for profile in profiles:
      writer.writerow([ profile[field] for field in fields ])
      count += 1
  print(f"Wrote {count} lines to: '{filename}'.")

# Use utility function to save profiles to CSV   
csv_header_fields = ['name', 'website', "linkedin_url",
                     'size', 'tags']
csv_filename = "all_company_profiles.csv"
save_profiles_to_csv(all_records, csv_filename, csv_header_fields)
import requests, json, time, csv

# Set your API key
API_KEY = "YOUR API KEY"

# Limit the number of records to pull (to prevent accidentally using 
# more credits than expected when testing out this code).
MAX_NUM_RECORDS_LIMIT = 150 # The maximum number of records to retrieve
USE_MAX_NUM_RECORDS_LIMIT = True # Set to False to pull all available records

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
  'Content-Type': "application/json",
  'X-api-key': API_KEY
}

# Create an SQL query
SQL_QUERY = \
f"""
  SELECT * FROM company
  WHERE industry = 'automotive'
  AND location.metro='detroit, michigan';
"""

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 100,
  'pretty': True
}

# Pull all results in multiple batches
batch = 1
# Store all records retreived in an array
all_records = []
# Time the process
start_time = time.time()
found_all_records = False
continue_scrolling = True

# While still scrolling through data and still records to be found
while continue_scrolling and not found_all_records: 

  # Check if we reached the maximum number of records we want
  if USE_MAX_NUM_RECORDS_LIMIT:
    num_records_to_request = MAX_NUM_RECORDS_LIMIT - len(all_records)
    # Adjust size parameter
    PARAMS['size'] = max(0, min(100, num_records_to_request))
    # Check if MAX_NUM_RECORDS_LIMIT reached
    if num_records_to_request == 0:
      print(f"Stopping - reached maximum number of records to pull "
            f"[MAX_NUM_RECORDS_LIMIT = {MAX_NUM_RECORDS_LIMIT}].")
      break

  # Pass the parameters object to the Company Search API
  response = requests.get(
    PDL_URL,
    headers=HEADERS,
    params=PARAMS
  ).json()

  # Check for successful response
  if response['status'] == 200:
    # Add records retrieved to the records array
    all_records.extend(response['data'])
    print(f"Retrieved {len(response['data'])} records in batch {batch} "
          f"- {response['total'] - len(all_records)} records remaining.")
  else:
    print(f"Error retrieving some records:\n\t"
          f"[{response['status']} - {response['error']['type']}] "
          f"{response['error']['message']}")
  
  # Get scroll_token from response if exists and store it in parameters object
  if 'scroll_token' in response:
    PARAMS['scroll_token'] = response['scroll_token']
  else:
    continue_scrolling = False
    print(f"Unable to continue scrolling.")

  batch += 1
  found_all_records = (len(all_records) == response['total'])
  time.sleep(6) # avoid hitting rate limit thresholds
 
# Calculate time required to process batches
end_time = time.time()
runtime = end_time - start_time
        
print(f"Successfully recovered {len(all_records)} profiles in "
      f"{batch} batches [{round(runtime, 2)} seconds].")

# Save profiles to CSV (utility function)
def save_profiles_to_csv(profiles, filename, fields=[], delim=','):
  # Define header fields
  if fields == [] and len(profiles) > 0:
      fields = profiles[0].keys()
  # Write CSV file
  with open(filename, 'w') as csvfile:
    writer = csv.writer(csvfile, delimiter=delim)
    # Write header
    writer.writerow(fields)
    # Write body
    count = 0
    for profile in profiles:
      writer.writerow([ profile[field] for field in fields ])
      count += 1
  print(f"Wrote {count} lines to: '{filename}'.")

# Use utility function to save profiles to CSV    
csv_header_fields = ['name', 'website', "linkedin_url",
                     'size', 'tags']
csv_filename = "all_company_profiles.csv"
save_profiles_to_csv(all_records, csv_filename, csv_header_fields)

Affiliate Lookup

(search by affiliated companies)

"I want to find all companies that are affiliated with Amazon (who's Company ID is hWBI7x4FvSurVNWDXD4uFgQt5ges)."

import json

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
# for enumerated possible values of industry

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
# for enumerated possible values of company sizes

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"term": {"affiliated_profiles": "hWBI7x4FvSurVNWDXD4uFgQt5ges"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
    "query": ES_QUERY,
    "size": 100
}

# Pass the parameters object to the Company Search API
response = CLIENT.company.search(**PARAMS).json()

# Check for successful response
if response["status"] == 200:
  
  data = response['data']
  
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The eager beaver was not so eager. See error and try again.")
  print("error:", response)
import json

# See https://github.com/peopledatalabs/peopledatalabs-python
from peopledatalabs import PDLPY

# Create a client, specifying your API key
CLIENT = PDLPY(
    api_key="YOUR API KEY",
)

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
# for enumerated possible values of industry

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
# for enumerated possible values of company sizes

# Create an SQL query
SQL_QUERY = \
f"""
  SELECT * FROM company
  WHERE affiliated_profiles = 'hWBI7x4FvSurVNWDXD4uFgQt5ges';
"""

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 100
}

# Pass the parameters object to the Company Search API
response = CLIENT.company.search(**PARAMS).json()

# Check for successful response
if response["status"] == 200:
  
  data = response['data']
  
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")
  
  print(f"Successfully grabbed {len(data)} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The eager beaver was not so eager. See error and try again.")
  print("error:", response)
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

import fs from 'fs';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Create an Elasticsearch query
const esQuery = {
  query: {
    bool: {
      must:[
        {"term": {"affiliated_profiles": "hWBI7x4FvSurVNWDXD4uFgQt5ges"}}
      ]
    }
  }
}

// Create a parameters JSON object
const params = {
  searchQuery: esQuery, 
  size: 100,
}

// Pass the parameters object to the Company Search API
PDLJSClient.company.search.elastic(params).then((data) => {
    // Write out all profiles found to file
    fs.writeFile("my_pdl_search.jsonl", Buffer.from(JSON.stringify(data.data)), (err) => {
        if (err) throw err;
    });
    console.log(`Successfully grabbed ${data.data.length} records from PDL.`);
    console.log(`${data["total"]} total PDL records exist matching this query.`)
}).catch((error) => {
    console.log("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
    console.log(error);
});
// See https://github.com/peopledatalabs/peopledatalabs-js
import PDLJS from 'peopledatalabs';

import fs from 'fs';

// Create a client, specifying your API key
const PDLJSClient = new PDLJS({ apiKey: "YOUR API KEY" });

// Create an SQL query
const sqlQuery = `SELECT * FROM company
                    WHERE affiliated_profiles = 'hWBI7x4FvSurVNWDXD4uFgQt5ges';`;

// Create a parameters JSON object
const params = {
    searchQuery: sqlQuery, 
    size: 100
}

// Pass the parameters object to the Company Search API
PDLJSClient.company.search.sql(params).then((data) => {
    // Write out all profiles found to file
    fs.writeFile("my_pdl_search.jsonl", Buffer.from(JSON.stringify(data.data)), (err) => {
        if (err) throw err;
    });
    console.log(`Successfully grabbed ${data.data.length} records from PDL.`);
    console.log(`${data["total"]} total PDL records exist matching this query.`)
}).catch((error) => {
    console.log("NOTE: The carrier pigeons lost motivation in flight. See error and try again.")
    console.log(error);
});
require 'json'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
# for enumerated possible values of industry

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
# for enumerated possible values of company sizes

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"term": {"affiliated_profiles": "hWBI7x4FvSurVNWDXD4uFgQt5ges"}}
      ]
    }
  }
}

# Pass parameters to the Company Search API
response = Peopledatalabs::Search.company(searchType: 'elastic', query: ES_QUERY, size: 100, pretty: true)

# Check for successful response
if response['status'] == 200
    data = response['data']
    # Write out each profile found to file
    File.open("my_pdl_search.jsonl", "w") do |out|
        data.each { |record| out.write(JSON.dump(record) + "\n") }
    end
    puts "Successfully grabbed #{data.length()} records from PDL."
    puts "#{response['total']} total PDL records exist matching this query."
else
    puts "NOTE: The carrier pigeons lost motivation in flight. See error and try again."
    puts "Error: #{response}"
end
require 'json'

# See https://github.com/peopledatalabs/peopledatalabs-ruby
require 'peopledatalabs'

# Set your API key
Peopledatalabs.api_key = 'YOUR API KEY'

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
# for enumerated possible values of industry

# https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
# for enumerated possible values of company sizes

# Create an SQL query
SQL_QUERY = \
"""
  SELECT * FROM company
  WHERE affiliated_profiles = 'hWBI7x4FvSurVNWDXD4uFgQt5ges';
"""

# Pass parameters to the Company Search API
response = Peopledatalabs::Search.company(searchType: 'sql', query: SQL_QUERY, size: 100, pretty: true)

# Check for successful response
if response['status'] == 200
    data = response['data']
    # Write out each profile found to file
    File.open("my_pdl_search.jsonl", "w") do |out|
        data.each { |record| out.write(JSON.dump(record) + "\n") }
    end
    puts "Successfully grabbed #{data.length()} records from PDL."
    puts "#{response['total']} total PDL records exist matching this query."
else
    puts "NOTE: The carrier pigeons lost motivation in flight. See error and try again."
    puts "Error: #{response}"
end
package main

import(
    "fmt"
    "os"
    "encoding/json"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
    // for enumerated possible values of industry

    // https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
    // for enumerated possible values of company sizes

    // Create an Elasticsearch query
    elasticSearchQuery := map[string]interface{} {
        "query": map[string]interface{} {
            "bool": map[string]interface{} {
                "must": []map[string]interface{} {
                    {"term": map[string]interface{}{"affiliated_profiles": "hWBI7x4FvSurVNWDXD4uFgQt5ges"}},
                },
            },
        },
    }

    // Create a parameters JSON object
    params := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 100,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            Query: elasticSearchQuery,
        },
    }

    // Pass the parameters object to the Company Search API
    response, err := client.Company.Search(context.Background(), params)
    // Check for successful response
    if err == nil {
        data := response.Data
        // Create file
        out, outErr := os.Create("my_pdl_search.jsonl")
        defer out.Close()
        if (outErr == nil) {
            for i := range data {
                // Convert each profile found to JSON
                record, jsonErr := json.Marshal(data[i])
                // Write out each profile to file
                if (jsonErr == nil) {
                    out.WriteString(string(record) + "\n")
                }
            }
            out.Sync()
        }
        fmt.Printf("Successfully grabbed %d records from PDL.\n", len(data))
        fmt.Printf("%d total PDL records exist matching this query.\n", response.Total)
    } else {
        fmt.Println("NOTE: The eager beaver was not so eager. See error and try again.")
        fmt.Println("Error:", err)
    } 
}
package main

import(
    "fmt"
    "os"
    "encoding/json"
    "context"
)

// See https://github.com/peopledatalabs/peopledatalabs-go
import (
    pdl "github.com/peopledatalabs/peopledatalabs-go"
    pdlmodel "github.com/peopledatalabs/peopledatalabs-go/model"
)

func main() {
    // Set your API key
    apiKey := "YOUR API KEY"
    // Set API key as environmental variable
    // apiKey := os.Getenv("API_KEY")

    // Create a client, specifying your API key
    client := pdl.New(apiKey)

    // https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/industry.txt
    // for enumerated possible values of industry

    // https://pdl-prod-schema.s3-us-west-2.amazonaws.com/14.0/enums/job_company_size.txt
    // for enumerated possible values of company sizes

    // Create an SQL query
    sqlQuery := "SELECT * FROM company" +
        " WHERE affiliated_profiles = 'hWBI7x4FvSurVNWDXD4uFgQt5ges';"

    // Create a parameters JSON object
    params := pdlmodel.SearchParams {
        BaseParams: pdlmodel.BaseParams {
            Size: 100,
            Pretty: true,
        },
        SearchBaseParams: pdlmodel.SearchBaseParams {
            SQL: sqlQuery,
        },
    }

    // Pass the parameters object to the Company Search API
    response, err := client.Company.Search(context.Background(), params)
    // Check for successful response
    if err == nil {
        data := response.Data
        // Create file
        out, outErr := os.Create("my_pdl_search.jsonl")
        defer out.Close()
        if (outErr == nil) {
            for i := range data {
                // Convert each profile found to JSON
                record, jsonErr := json.Marshal(data[i])
                // Write out each profile to file
                if (jsonErr == nil) {
                    out.WriteString(string(record) + "\n")
                }
            }
            out.Sync()
        }
        fmt.Printf("Successfully grabbed %d records from PDL.\n", len(data))
        fmt.Printf("%d total PDL records exist matching this query.\n", response.Total)
    } else {
        fmt.Println("NOTE: The eager beaver was not so eager. See error and try again.")
        fmt.Println("Error:", err)
    } 
}
import requests, json

# Set your API key
API_KEY = "YOUR API KEY"

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
"Content-Type": "application/json",
"X-api-key": API_KEY
}

# Create an Elasticsearch query
ES_QUERY = {
  "query": {
    "bool": {
      "must": [
        {"term": {"affiliated_profiles": "hWBI7x4FvSurVNWDXD4uFgQt5ges"}}
      ]
    }
  }
}

# Create a parameters JSON object
PARAMS = {
    "query": json.dumps(ES_QUERY),
    "size": 100
}

# Pass the parameters object to the Company Search API
response = requests.get(
    PDL_URL,
    headers=HEADERS,
    params=PARAMS
).json()

# Check for successful response
if response["status"] == 200:
  data = response['data']
  
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")

  print(f"Successfully grabbed {len(response['data'])} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The eager beaver was not so eager. See error and try again.")
  print("error:", response)
import requests, json

# Set your API key
API_KEY = "YOUR API KEY"

# Set the Company Search API URL
PDL_URL = "https://api.peopledatalabs.com/v5/company/search"

# Set headers
HEADERS = {
"Content-Type": "application/json",
"X-api-key": API_KEY
}

# Create an SQL query
SQL_QUERY = \
f"""
  SELECT * FROM company
  WHERE affiliated_profiles = 'hWBI7x4FvSurVNWDXD4uFgQt5ges';
"""

# Create a parameters JSON object
PARAMS = {
  'sql': SQL_QUERY,
  'size': 100
}

# Pass the parameters object to the Company Search API
response = requests.get(
  PDL_URL,
  headers=HEADERS,
  params=PARAMS
).json()

# Check for successful response
if response["status"] == 200:
  
  data = response['data']
  
  # Write out each profile found to file
  with open("my_pdl_search.jsonl", "w") as out:
    for record in data:
      out.write(json.dumps(record) + "\n")

  print(f"Successfully grabbed {len(response['data'])} records from PDL.")
  print(f"{response['total']} total PDL records exist matching this query.")
else:
  print("NOTE: The eager beaver was not so eager. See error and try again.")
  print("error:", response)